perf record: Prepare reading from multiple evlists in record__mmap_read_all()
[deliverable/linux.git] / tools / perf / builtin-record.c
1 /*
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
7 */
8 #include "builtin.h"
9
10 #include "perf.h"
11
12 #include "util/build-id.h"
13 #include "util/util.h"
14 #include <subcmd/parse-options.h>
15 #include "util/parse-events.h"
16 #include "util/config.h"
17
18 #include "util/callchain.h"
19 #include "util/cgroup.h"
20 #include "util/header.h"
21 #include "util/event.h"
22 #include "util/evlist.h"
23 #include "util/evsel.h"
24 #include "util/debug.h"
25 #include "util/session.h"
26 #include "util/tool.h"
27 #include "util/symbol.h"
28 #include "util/cpumap.h"
29 #include "util/thread_map.h"
30 #include "util/data.h"
31 #include "util/perf_regs.h"
32 #include "util/auxtrace.h"
33 #include "util/tsc.h"
34 #include "util/parse-branch-options.h"
35 #include "util/parse-regs-options.h"
36 #include "util/llvm-utils.h"
37 #include "util/bpf-loader.h"
38 #include "util/trigger.h"
39 #include "asm/bug.h"
40
41 #include <unistd.h>
42 #include <sched.h>
43 #include <sys/mman.h>
44 #include <asm/bug.h>
45
46
47 struct record {
48 struct perf_tool tool;
49 struct record_opts opts;
50 u64 bytes_written;
51 struct perf_data_file file;
52 struct auxtrace_record *itr;
53 struct perf_evlist *evlist;
54 struct perf_session *session;
55 const char *progname;
56 int realtime_prio;
57 bool no_buildid;
58 bool no_buildid_set;
59 bool no_buildid_cache;
60 bool no_buildid_cache_set;
61 bool buildid_all;
62 bool timestamp_filename;
63 bool switch_output;
64 unsigned long long samples;
65 };
66
67 static int record__write(struct record *rec, void *bf, size_t size)
68 {
69 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
70 pr_err("failed to write perf data, error: %m\n");
71 return -1;
72 }
73
74 rec->bytes_written += size;
75 return 0;
76 }
77
78 static int process_synthesized_event(struct perf_tool *tool,
79 union perf_event *event,
80 struct perf_sample *sample __maybe_unused,
81 struct machine *machine __maybe_unused)
82 {
83 struct record *rec = container_of(tool, struct record, tool);
84 return record__write(rec, event, event->header.size);
85 }
86
87 static int
88 backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
89 {
90 struct perf_event_header *pheader;
91 u64 evt_head = head;
92 int size = mask + 1;
93
94 pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
95 pheader = (struct perf_event_header *)(buf + (head & mask));
96 *start = head;
97 while (true) {
98 if (evt_head - head >= (unsigned int)size) {
99 pr_debug("Finshed reading backward ring buffer: rewind\n");
100 if (evt_head - head > (unsigned int)size)
101 evt_head -= pheader->size;
102 *end = evt_head;
103 return 0;
104 }
105
106 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
107
108 if (pheader->size == 0) {
109 pr_debug("Finshed reading backward ring buffer: get start\n");
110 *end = evt_head;
111 return 0;
112 }
113
114 evt_head += pheader->size;
115 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
116 }
117 WARN_ONCE(1, "Shouldn't get here\n");
118 return -1;
119 }
120
121 static int
122 rb_find_range(struct perf_evlist *evlist,
123 void *data, int mask, u64 head, u64 old,
124 u64 *start, u64 *end)
125 {
126 if (!evlist->backward) {
127 *start = old;
128 *end = head;
129 return 0;
130 }
131
132 return backward_rb_find_range(data, mask, head, start, end);
133 }
134
135 static int record__mmap_read(struct record *rec, struct perf_evlist *evlist, int idx)
136 {
137 struct perf_mmap *md = &evlist->mmap[idx];
138 u64 head = perf_mmap__read_head(md);
139 u64 old = md->prev;
140 u64 end = head, start = old;
141 unsigned char *data = md->base + page_size;
142 unsigned long size;
143 void *buf;
144 int rc = 0;
145
146 if (rb_find_range(evlist, data, md->mask, head,
147 old, &start, &end))
148 return -1;
149
150 if (start == end)
151 return 0;
152
153 rec->samples++;
154
155 size = end - start;
156 if (size > (unsigned long)(md->mask) + 1) {
157 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
158
159 md->prev = head;
160 perf_evlist__mmap_consume(evlist, idx);
161 return 0;
162 }
163
164 if ((start & md->mask) + size != (end & md->mask)) {
165 buf = &data[start & md->mask];
166 size = md->mask + 1 - (start & md->mask);
167 start += size;
168
169 if (record__write(rec, buf, size) < 0) {
170 rc = -1;
171 goto out;
172 }
173 }
174
175 buf = &data[start & md->mask];
176 size = end - start;
177 start += size;
178
179 if (record__write(rec, buf, size) < 0) {
180 rc = -1;
181 goto out;
182 }
183
184 md->prev = head;
185 perf_evlist__mmap_consume(evlist, idx);
186 out:
187 return rc;
188 }
189
190 static volatile int done;
191 static volatile int signr = -1;
192 static volatile int child_finished;
193
194 static volatile int auxtrace_record__snapshot_started;
195 static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
196 static DEFINE_TRIGGER(switch_output_trigger);
197
198 static void sig_handler(int sig)
199 {
200 if (sig == SIGCHLD)
201 child_finished = 1;
202 else
203 signr = sig;
204
205 done = 1;
206 }
207
208 static void record__sig_exit(void)
209 {
210 if (signr == -1)
211 return;
212
213 signal(signr, SIG_DFL);
214 raise(signr);
215 }
216
217 #ifdef HAVE_AUXTRACE_SUPPORT
218
219 static int record__process_auxtrace(struct perf_tool *tool,
220 union perf_event *event, void *data1,
221 size_t len1, void *data2, size_t len2)
222 {
223 struct record *rec = container_of(tool, struct record, tool);
224 struct perf_data_file *file = &rec->file;
225 size_t padding;
226 u8 pad[8] = {0};
227
228 if (!perf_data_file__is_pipe(file)) {
229 off_t file_offset;
230 int fd = perf_data_file__fd(file);
231 int err;
232
233 file_offset = lseek(fd, 0, SEEK_CUR);
234 if (file_offset == -1)
235 return -1;
236 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
237 event, file_offset);
238 if (err)
239 return err;
240 }
241
242 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
243 padding = (len1 + len2) & 7;
244 if (padding)
245 padding = 8 - padding;
246
247 record__write(rec, event, event->header.size);
248 record__write(rec, data1, len1);
249 if (len2)
250 record__write(rec, data2, len2);
251 record__write(rec, &pad, padding);
252
253 return 0;
254 }
255
256 static int record__auxtrace_mmap_read(struct record *rec,
257 struct auxtrace_mmap *mm)
258 {
259 int ret;
260
261 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
262 record__process_auxtrace);
263 if (ret < 0)
264 return ret;
265
266 if (ret)
267 rec->samples++;
268
269 return 0;
270 }
271
272 static int record__auxtrace_mmap_read_snapshot(struct record *rec,
273 struct auxtrace_mmap *mm)
274 {
275 int ret;
276
277 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
278 record__process_auxtrace,
279 rec->opts.auxtrace_snapshot_size);
280 if (ret < 0)
281 return ret;
282
283 if (ret)
284 rec->samples++;
285
286 return 0;
287 }
288
289 static int record__auxtrace_read_snapshot_all(struct record *rec)
290 {
291 int i;
292 int rc = 0;
293
294 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
295 struct auxtrace_mmap *mm =
296 &rec->evlist->mmap[i].auxtrace_mmap;
297
298 if (!mm->base)
299 continue;
300
301 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
302 rc = -1;
303 goto out;
304 }
305 }
306 out:
307 return rc;
308 }
309
310 static void record__read_auxtrace_snapshot(struct record *rec)
311 {
312 pr_debug("Recording AUX area tracing snapshot\n");
313 if (record__auxtrace_read_snapshot_all(rec) < 0) {
314 trigger_error(&auxtrace_snapshot_trigger);
315 } else {
316 if (auxtrace_record__snapshot_finish(rec->itr))
317 trigger_error(&auxtrace_snapshot_trigger);
318 else
319 trigger_ready(&auxtrace_snapshot_trigger);
320 }
321 }
322
323 #else
324
325 static inline
326 int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
327 struct auxtrace_mmap *mm __maybe_unused)
328 {
329 return 0;
330 }
331
332 static inline
333 void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
334 {
335 }
336
337 static inline
338 int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
339 {
340 return 0;
341 }
342
343 #endif
344
345 static int record__mmap_evlist(struct record *rec,
346 struct perf_evlist *evlist)
347 {
348 struct record_opts *opts = &rec->opts;
349 char msg[512];
350
351 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
352 opts->auxtrace_mmap_pages,
353 opts->auxtrace_snapshot_mode) < 0) {
354 if (errno == EPERM) {
355 pr_err("Permission error mapping pages.\n"
356 "Consider increasing "
357 "/proc/sys/kernel/perf_event_mlock_kb,\n"
358 "or try again with a smaller value of -m/--mmap_pages.\n"
359 "(current value: %u,%u)\n",
360 opts->mmap_pages, opts->auxtrace_mmap_pages);
361 return -errno;
362 } else {
363 pr_err("failed to mmap with %d (%s)\n", errno,
364 strerror_r(errno, msg, sizeof(msg)));
365 if (errno)
366 return -errno;
367 else
368 return -EINVAL;
369 }
370 }
371 return 0;
372 }
373
374 static int record__mmap(struct record *rec)
375 {
376 return record__mmap_evlist(rec, rec->evlist);
377 }
378
379 static int record__open(struct record *rec)
380 {
381 char msg[512];
382 struct perf_evsel *pos;
383 struct perf_evlist *evlist = rec->evlist;
384 struct perf_session *session = rec->session;
385 struct record_opts *opts = &rec->opts;
386 int rc = 0;
387
388 perf_evlist__config(evlist, opts, &callchain_param);
389
390 evlist__for_each_entry(evlist, pos) {
391 try_again:
392 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
393 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
394 if (verbose)
395 ui__warning("%s\n", msg);
396 goto try_again;
397 }
398
399 rc = -errno;
400 perf_evsel__open_strerror(pos, &opts->target,
401 errno, msg, sizeof(msg));
402 ui__error("%s\n", msg);
403 goto out;
404 }
405 }
406
407 if (perf_evlist__apply_filters(evlist, &pos)) {
408 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
409 pos->filter, perf_evsel__name(pos), errno,
410 strerror_r(errno, msg, sizeof(msg)));
411 rc = -1;
412 goto out;
413 }
414
415 rc = record__mmap(rec);
416 if (rc)
417 goto out;
418
419 session->evlist = evlist;
420 perf_session__set_id_hdr_size(session);
421 out:
422 return rc;
423 }
424
425 static int process_sample_event(struct perf_tool *tool,
426 union perf_event *event,
427 struct perf_sample *sample,
428 struct perf_evsel *evsel,
429 struct machine *machine)
430 {
431 struct record *rec = container_of(tool, struct record, tool);
432
433 rec->samples++;
434
435 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
436 }
437
438 static int process_buildids(struct record *rec)
439 {
440 struct perf_data_file *file = &rec->file;
441 struct perf_session *session = rec->session;
442
443 if (file->size == 0)
444 return 0;
445
446 /*
447 * During this process, it'll load kernel map and replace the
448 * dso->long_name to a real pathname it found. In this case
449 * we prefer the vmlinux path like
450 * /lib/modules/3.16.4/build/vmlinux
451 *
452 * rather than build-id path (in debug directory).
453 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
454 */
455 symbol_conf.ignore_vmlinux_buildid = true;
456
457 /*
458 * If --buildid-all is given, it marks all DSO regardless of hits,
459 * so no need to process samples.
460 */
461 if (rec->buildid_all)
462 rec->tool.sample = NULL;
463
464 return perf_session__process_events(session);
465 }
466
467 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
468 {
469 int err;
470 struct perf_tool *tool = data;
471 /*
472 *As for guest kernel when processing subcommand record&report,
473 *we arrange module mmap prior to guest kernel mmap and trigger
474 *a preload dso because default guest module symbols are loaded
475 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
476 *method is used to avoid symbol missing when the first addr is
477 *in module instead of in guest kernel.
478 */
479 err = perf_event__synthesize_modules(tool, process_synthesized_event,
480 machine);
481 if (err < 0)
482 pr_err("Couldn't record guest kernel [%d]'s reference"
483 " relocation symbol.\n", machine->pid);
484
485 /*
486 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
487 * have no _text sometimes.
488 */
489 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
490 machine);
491 if (err < 0)
492 pr_err("Couldn't record guest kernel [%d]'s reference"
493 " relocation symbol.\n", machine->pid);
494 }
495
496 static struct perf_event_header finished_round_event = {
497 .size = sizeof(struct perf_event_header),
498 .type = PERF_RECORD_FINISHED_ROUND,
499 };
500
501 static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist)
502 {
503 u64 bytes_written = rec->bytes_written;
504 int i;
505 int rc = 0;
506
507 if (!evlist)
508 return 0;
509
510 for (i = 0; i < evlist->nr_mmaps; i++) {
511 struct auxtrace_mmap *mm = &evlist->mmap[i].auxtrace_mmap;
512
513 if (evlist->mmap[i].base) {
514 if (record__mmap_read(rec, evlist, i) != 0) {
515 rc = -1;
516 goto out;
517 }
518 }
519
520 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
521 record__auxtrace_mmap_read(rec, mm) != 0) {
522 rc = -1;
523 goto out;
524 }
525 }
526
527 /*
528 * Mark the round finished in case we wrote
529 * at least one event.
530 */
531 if (bytes_written != rec->bytes_written)
532 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
533
534 out:
535 return rc;
536 }
537
538 static int record__mmap_read_all(struct record *rec)
539 {
540 int err;
541
542 err = record__mmap_read_evlist(rec, rec->evlist);
543 if (err)
544 return err;
545
546 return err;
547 }
548
549 static void record__init_features(struct record *rec)
550 {
551 struct perf_session *session = rec->session;
552 int feat;
553
554 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
555 perf_header__set_feat(&session->header, feat);
556
557 if (rec->no_buildid)
558 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
559
560 if (!have_tracepoints(&rec->evlist->entries))
561 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
562
563 if (!rec->opts.branch_stack)
564 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
565
566 if (!rec->opts.full_auxtrace)
567 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
568
569 perf_header__clear_feat(&session->header, HEADER_STAT);
570 }
571
572 static void
573 record__finish_output(struct record *rec)
574 {
575 struct perf_data_file *file = &rec->file;
576 int fd = perf_data_file__fd(file);
577
578 if (file->is_pipe)
579 return;
580
581 rec->session->header.data_size += rec->bytes_written;
582 file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
583
584 if (!rec->no_buildid) {
585 process_buildids(rec);
586
587 if (rec->buildid_all)
588 dsos__hit_all(rec->session);
589 }
590 perf_session__write_header(rec->session, rec->evlist, fd, true);
591
592 return;
593 }
594
595 static int record__synthesize_workload(struct record *rec)
596 {
597 struct {
598 struct thread_map map;
599 struct thread_map_data map_data;
600 } thread_map;
601
602 thread_map.map.nr = 1;
603 thread_map.map.map[0].pid = rec->evlist->workload.pid;
604 thread_map.map.map[0].comm = NULL;
605 return perf_event__synthesize_thread_map(&rec->tool, &thread_map.map,
606 process_synthesized_event,
607 &rec->session->machines.host,
608 rec->opts.sample_address,
609 rec->opts.proc_map_timeout);
610 }
611
612 static int record__synthesize(struct record *rec);
613
614 static int
615 record__switch_output(struct record *rec, bool at_exit)
616 {
617 struct perf_data_file *file = &rec->file;
618 int fd, err;
619
620 /* Same Size: "2015122520103046"*/
621 char timestamp[] = "InvalidTimestamp";
622
623 rec->samples = 0;
624 record__finish_output(rec);
625 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
626 if (err) {
627 pr_err("Failed to get current timestamp\n");
628 return -EINVAL;
629 }
630
631 fd = perf_data_file__switch(file, timestamp,
632 rec->session->header.data_offset,
633 at_exit);
634 if (fd >= 0 && !at_exit) {
635 rec->bytes_written = 0;
636 rec->session->header.data_size = 0;
637 }
638
639 if (!quiet)
640 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
641 file->path, timestamp);
642
643 /* Output tracking events */
644 if (!at_exit) {
645 record__synthesize(rec);
646
647 /*
648 * In 'perf record --switch-output' without -a,
649 * record__synthesize() in record__switch_output() won't
650 * generate tracking events because there's no thread_map
651 * in evlist. Which causes newly created perf.data doesn't
652 * contain map and comm information.
653 * Create a fake thread_map and directly call
654 * perf_event__synthesize_thread_map() for those events.
655 */
656 if (target__none(&rec->opts.target))
657 record__synthesize_workload(rec);
658 }
659 return fd;
660 }
661
662 static volatile int workload_exec_errno;
663
664 /*
665 * perf_evlist__prepare_workload will send a SIGUSR1
666 * if the fork fails, since we asked by setting its
667 * want_signal to true.
668 */
669 static void workload_exec_failed_signal(int signo __maybe_unused,
670 siginfo_t *info,
671 void *ucontext __maybe_unused)
672 {
673 workload_exec_errno = info->si_value.sival_int;
674 done = 1;
675 child_finished = 1;
676 }
677
678 static void snapshot_sig_handler(int sig);
679
680 int __weak
681 perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
682 struct perf_tool *tool __maybe_unused,
683 perf_event__handler_t process __maybe_unused,
684 struct machine *machine __maybe_unused)
685 {
686 return 0;
687 }
688
689 static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
690 {
691 if (rec->evlist && rec->evlist->mmap && rec->evlist->mmap[0].base)
692 return rec->evlist->mmap[0].base;
693 return NULL;
694 }
695
696 static int record__synthesize(struct record *rec)
697 {
698 struct perf_session *session = rec->session;
699 struct machine *machine = &session->machines.host;
700 struct perf_data_file *file = &rec->file;
701 struct record_opts *opts = &rec->opts;
702 struct perf_tool *tool = &rec->tool;
703 int fd = perf_data_file__fd(file);
704 int err = 0;
705
706 if (file->is_pipe) {
707 err = perf_event__synthesize_attrs(tool, session,
708 process_synthesized_event);
709 if (err < 0) {
710 pr_err("Couldn't synthesize attrs.\n");
711 goto out;
712 }
713
714 if (have_tracepoints(&rec->evlist->entries)) {
715 /*
716 * FIXME err <= 0 here actually means that
717 * there were no tracepoints so its not really
718 * an error, just that we don't need to
719 * synthesize anything. We really have to
720 * return this more properly and also
721 * propagate errors that now are calling die()
722 */
723 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
724 process_synthesized_event);
725 if (err <= 0) {
726 pr_err("Couldn't record tracing data.\n");
727 goto out;
728 }
729 rec->bytes_written += err;
730 }
731 }
732
733 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
734 process_synthesized_event, machine);
735 if (err)
736 goto out;
737
738 if (rec->opts.full_auxtrace) {
739 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
740 session, process_synthesized_event);
741 if (err)
742 goto out;
743 }
744
745 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
746 machine);
747 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
748 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
749 "Check /proc/kallsyms permission or run as root.\n");
750
751 err = perf_event__synthesize_modules(tool, process_synthesized_event,
752 machine);
753 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
754 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
755 "Check /proc/modules permission or run as root.\n");
756
757 if (perf_guest) {
758 machines__process_guests(&session->machines,
759 perf_event__synthesize_guest_os, tool);
760 }
761
762 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
763 process_synthesized_event, opts->sample_address,
764 opts->proc_map_timeout);
765 out:
766 return err;
767 }
768
769 static int __cmd_record(struct record *rec, int argc, const char **argv)
770 {
771 int err;
772 int status = 0;
773 unsigned long waking = 0;
774 const bool forks = argc > 0;
775 struct machine *machine;
776 struct perf_tool *tool = &rec->tool;
777 struct record_opts *opts = &rec->opts;
778 struct perf_data_file *file = &rec->file;
779 struct perf_session *session;
780 bool disabled = false, draining = false;
781 int fd;
782
783 rec->progname = argv[0];
784
785 atexit(record__sig_exit);
786 signal(SIGCHLD, sig_handler);
787 signal(SIGINT, sig_handler);
788 signal(SIGTERM, sig_handler);
789
790 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output) {
791 signal(SIGUSR2, snapshot_sig_handler);
792 if (rec->opts.auxtrace_snapshot_mode)
793 trigger_on(&auxtrace_snapshot_trigger);
794 if (rec->switch_output)
795 trigger_on(&switch_output_trigger);
796 } else {
797 signal(SIGUSR2, SIG_IGN);
798 }
799
800 session = perf_session__new(file, false, tool);
801 if (session == NULL) {
802 pr_err("Perf session creation failed.\n");
803 return -1;
804 }
805
806 fd = perf_data_file__fd(file);
807 rec->session = session;
808
809 record__init_features(rec);
810
811 if (forks) {
812 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
813 argv, file->is_pipe,
814 workload_exec_failed_signal);
815 if (err < 0) {
816 pr_err("Couldn't run the workload!\n");
817 status = err;
818 goto out_delete_session;
819 }
820 }
821
822 if (record__open(rec) != 0) {
823 err = -1;
824 goto out_child;
825 }
826
827 err = bpf__apply_obj_config();
828 if (err) {
829 char errbuf[BUFSIZ];
830
831 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
832 pr_err("ERROR: Apply config to BPF failed: %s\n",
833 errbuf);
834 goto out_child;
835 }
836
837 /*
838 * Normally perf_session__new would do this, but it doesn't have the
839 * evlist.
840 */
841 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
842 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
843 rec->tool.ordered_events = false;
844 }
845
846 if (!rec->evlist->nr_groups)
847 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
848
849 if (file->is_pipe) {
850 err = perf_header__write_pipe(fd);
851 if (err < 0)
852 goto out_child;
853 } else {
854 err = perf_session__write_header(session, rec->evlist, fd, false);
855 if (err < 0)
856 goto out_child;
857 }
858
859 if (!rec->no_buildid
860 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
861 pr_err("Couldn't generate buildids. "
862 "Use --no-buildid to profile anyway.\n");
863 err = -1;
864 goto out_child;
865 }
866
867 machine = &session->machines.host;
868
869 err = record__synthesize(rec);
870 if (err < 0)
871 goto out_child;
872
873 if (rec->realtime_prio) {
874 struct sched_param param;
875
876 param.sched_priority = rec->realtime_prio;
877 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
878 pr_err("Could not set realtime priority.\n");
879 err = -1;
880 goto out_child;
881 }
882 }
883
884 /*
885 * When perf is starting the traced process, all the events
886 * (apart from group members) have enable_on_exec=1 set,
887 * so don't spoil it by prematurely enabling them.
888 */
889 if (!target__none(&opts->target) && !opts->initial_delay)
890 perf_evlist__enable(rec->evlist);
891
892 /*
893 * Let the child rip
894 */
895 if (forks) {
896 union perf_event *event;
897
898 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
899 if (event == NULL) {
900 err = -ENOMEM;
901 goto out_child;
902 }
903
904 /*
905 * Some H/W events are generated before COMM event
906 * which is emitted during exec(), so perf script
907 * cannot see a correct process name for those events.
908 * Synthesize COMM event to prevent it.
909 */
910 perf_event__synthesize_comm(tool, event,
911 rec->evlist->workload.pid,
912 process_synthesized_event,
913 machine);
914 free(event);
915
916 perf_evlist__start_workload(rec->evlist);
917 }
918
919 if (opts->initial_delay) {
920 usleep(opts->initial_delay * 1000);
921 perf_evlist__enable(rec->evlist);
922 }
923
924 trigger_ready(&auxtrace_snapshot_trigger);
925 trigger_ready(&switch_output_trigger);
926 for (;;) {
927 unsigned long long hits = rec->samples;
928
929 if (record__mmap_read_all(rec) < 0) {
930 trigger_error(&auxtrace_snapshot_trigger);
931 trigger_error(&switch_output_trigger);
932 err = -1;
933 goto out_child;
934 }
935
936 if (auxtrace_record__snapshot_started) {
937 auxtrace_record__snapshot_started = 0;
938 if (!trigger_is_error(&auxtrace_snapshot_trigger))
939 record__read_auxtrace_snapshot(rec);
940 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
941 pr_err("AUX area tracing snapshot failed\n");
942 err = -1;
943 goto out_child;
944 }
945 }
946
947 if (trigger_is_hit(&switch_output_trigger)) {
948 trigger_ready(&switch_output_trigger);
949
950 if (!quiet)
951 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
952 waking);
953 waking = 0;
954 fd = record__switch_output(rec, false);
955 if (fd < 0) {
956 pr_err("Failed to switch to new file\n");
957 trigger_error(&switch_output_trigger);
958 err = fd;
959 goto out_child;
960 }
961 }
962
963 if (hits == rec->samples) {
964 if (done || draining)
965 break;
966 err = perf_evlist__poll(rec->evlist, -1);
967 /*
968 * Propagate error, only if there's any. Ignore positive
969 * number of returned events and interrupt error.
970 */
971 if (err > 0 || (err < 0 && errno == EINTR))
972 err = 0;
973 waking++;
974
975 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
976 draining = true;
977 }
978
979 /*
980 * When perf is starting the traced process, at the end events
981 * die with the process and we wait for that. Thus no need to
982 * disable events in this case.
983 */
984 if (done && !disabled && !target__none(&opts->target)) {
985 trigger_off(&auxtrace_snapshot_trigger);
986 perf_evlist__disable(rec->evlist);
987 disabled = true;
988 }
989 }
990 trigger_off(&auxtrace_snapshot_trigger);
991 trigger_off(&switch_output_trigger);
992
993 if (forks && workload_exec_errno) {
994 char msg[STRERR_BUFSIZE];
995 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
996 pr_err("Workload failed: %s\n", emsg);
997 err = -1;
998 goto out_child;
999 }
1000
1001 if (!quiet)
1002 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
1003
1004 out_child:
1005 if (forks) {
1006 int exit_status;
1007
1008 if (!child_finished)
1009 kill(rec->evlist->workload.pid, SIGTERM);
1010
1011 wait(&exit_status);
1012
1013 if (err < 0)
1014 status = err;
1015 else if (WIFEXITED(exit_status))
1016 status = WEXITSTATUS(exit_status);
1017 else if (WIFSIGNALED(exit_status))
1018 signr = WTERMSIG(exit_status);
1019 } else
1020 status = err;
1021
1022 /* this will be recalculated during process_buildids() */
1023 rec->samples = 0;
1024
1025 if (!err) {
1026 if (!rec->timestamp_filename) {
1027 record__finish_output(rec);
1028 } else {
1029 fd = record__switch_output(rec, true);
1030 if (fd < 0) {
1031 status = fd;
1032 goto out_delete_session;
1033 }
1034 }
1035 }
1036
1037 if (!err && !quiet) {
1038 char samples[128];
1039 const char *postfix = rec->timestamp_filename ?
1040 ".<timestamp>" : "";
1041
1042 if (rec->samples && !rec->opts.full_auxtrace)
1043 scnprintf(samples, sizeof(samples),
1044 " (%" PRIu64 " samples)", rec->samples);
1045 else
1046 samples[0] = '\0';
1047
1048 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
1049 perf_data_file__size(file) / 1024.0 / 1024.0,
1050 file->path, postfix, samples);
1051 }
1052
1053 out_delete_session:
1054 perf_session__delete(session);
1055 return status;
1056 }
1057
1058 static void callchain_debug(struct callchain_param *callchain)
1059 {
1060 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
1061
1062 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
1063
1064 if (callchain->record_mode == CALLCHAIN_DWARF)
1065 pr_debug("callchain: stack dump size %d\n",
1066 callchain->dump_size);
1067 }
1068
1069 int record_opts__parse_callchain(struct record_opts *record,
1070 struct callchain_param *callchain,
1071 const char *arg, bool unset)
1072 {
1073 int ret;
1074 callchain->enabled = !unset;
1075
1076 /* --no-call-graph */
1077 if (unset) {
1078 callchain->record_mode = CALLCHAIN_NONE;
1079 pr_debug("callchain: disabled\n");
1080 return 0;
1081 }
1082
1083 ret = parse_callchain_record_opt(arg, callchain);
1084 if (!ret) {
1085 /* Enable data address sampling for DWARF unwind. */
1086 if (callchain->record_mode == CALLCHAIN_DWARF)
1087 record->sample_address = true;
1088 callchain_debug(callchain);
1089 }
1090
1091 return ret;
1092 }
1093
1094 int record_parse_callchain_opt(const struct option *opt,
1095 const char *arg,
1096 int unset)
1097 {
1098 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
1099 }
1100
1101 int record_callchain_opt(const struct option *opt,
1102 const char *arg __maybe_unused,
1103 int unset __maybe_unused)
1104 {
1105 struct callchain_param *callchain = opt->value;
1106
1107 callchain->enabled = true;
1108
1109 if (callchain->record_mode == CALLCHAIN_NONE)
1110 callchain->record_mode = CALLCHAIN_FP;
1111
1112 callchain_debug(callchain);
1113 return 0;
1114 }
1115
1116 static int perf_record_config(const char *var, const char *value, void *cb)
1117 {
1118 struct record *rec = cb;
1119
1120 if (!strcmp(var, "record.build-id")) {
1121 if (!strcmp(value, "cache"))
1122 rec->no_buildid_cache = false;
1123 else if (!strcmp(value, "no-cache"))
1124 rec->no_buildid_cache = true;
1125 else if (!strcmp(value, "skip"))
1126 rec->no_buildid = true;
1127 else
1128 return -1;
1129 return 0;
1130 }
1131 if (!strcmp(var, "record.call-graph"))
1132 var = "call-graph.record-mode"; /* fall-through */
1133
1134 return perf_default_config(var, value, cb);
1135 }
1136
1137 struct clockid_map {
1138 const char *name;
1139 int clockid;
1140 };
1141
1142 #define CLOCKID_MAP(n, c) \
1143 { .name = n, .clockid = (c), }
1144
1145 #define CLOCKID_END { .name = NULL, }
1146
1147
1148 /*
1149 * Add the missing ones, we need to build on many distros...
1150 */
1151 #ifndef CLOCK_MONOTONIC_RAW
1152 #define CLOCK_MONOTONIC_RAW 4
1153 #endif
1154 #ifndef CLOCK_BOOTTIME
1155 #define CLOCK_BOOTTIME 7
1156 #endif
1157 #ifndef CLOCK_TAI
1158 #define CLOCK_TAI 11
1159 #endif
1160
1161 static const struct clockid_map clockids[] = {
1162 /* available for all events, NMI safe */
1163 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1164 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1165
1166 /* available for some events */
1167 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1168 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1169 CLOCKID_MAP("tai", CLOCK_TAI),
1170
1171 /* available for the lazy */
1172 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1173 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1174 CLOCKID_MAP("real", CLOCK_REALTIME),
1175 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1176
1177 CLOCKID_END,
1178 };
1179
1180 static int parse_clockid(const struct option *opt, const char *str, int unset)
1181 {
1182 struct record_opts *opts = (struct record_opts *)opt->value;
1183 const struct clockid_map *cm;
1184 const char *ostr = str;
1185
1186 if (unset) {
1187 opts->use_clockid = 0;
1188 return 0;
1189 }
1190
1191 /* no arg passed */
1192 if (!str)
1193 return 0;
1194
1195 /* no setting it twice */
1196 if (opts->use_clockid)
1197 return -1;
1198
1199 opts->use_clockid = true;
1200
1201 /* if its a number, we're done */
1202 if (sscanf(str, "%d", &opts->clockid) == 1)
1203 return 0;
1204
1205 /* allow a "CLOCK_" prefix to the name */
1206 if (!strncasecmp(str, "CLOCK_", 6))
1207 str += 6;
1208
1209 for (cm = clockids; cm->name; cm++) {
1210 if (!strcasecmp(str, cm->name)) {
1211 opts->clockid = cm->clockid;
1212 return 0;
1213 }
1214 }
1215
1216 opts->use_clockid = false;
1217 ui__warning("unknown clockid %s, check man page\n", ostr);
1218 return -1;
1219 }
1220
1221 static int record__parse_mmap_pages(const struct option *opt,
1222 const char *str,
1223 int unset __maybe_unused)
1224 {
1225 struct record_opts *opts = opt->value;
1226 char *s, *p;
1227 unsigned int mmap_pages;
1228 int ret;
1229
1230 if (!str)
1231 return -EINVAL;
1232
1233 s = strdup(str);
1234 if (!s)
1235 return -ENOMEM;
1236
1237 p = strchr(s, ',');
1238 if (p)
1239 *p = '\0';
1240
1241 if (*s) {
1242 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1243 if (ret)
1244 goto out_free;
1245 opts->mmap_pages = mmap_pages;
1246 }
1247
1248 if (!p) {
1249 ret = 0;
1250 goto out_free;
1251 }
1252
1253 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1254 if (ret)
1255 goto out_free;
1256
1257 opts->auxtrace_mmap_pages = mmap_pages;
1258
1259 out_free:
1260 free(s);
1261 return ret;
1262 }
1263
1264 static const char * const __record_usage[] = {
1265 "perf record [<options>] [<command>]",
1266 "perf record [<options>] -- <command> [<options>]",
1267 NULL
1268 };
1269 const char * const *record_usage = __record_usage;
1270
1271 /*
1272 * XXX Ideally would be local to cmd_record() and passed to a record__new
1273 * because we need to have access to it in record__exit, that is called
1274 * after cmd_record() exits, but since record_options need to be accessible to
1275 * builtin-script, leave it here.
1276 *
1277 * At least we don't ouch it in all the other functions here directly.
1278 *
1279 * Just say no to tons of global variables, sigh.
1280 */
1281 static struct record record = {
1282 .opts = {
1283 .sample_time = true,
1284 .mmap_pages = UINT_MAX,
1285 .user_freq = UINT_MAX,
1286 .user_interval = ULLONG_MAX,
1287 .freq = 4000,
1288 .target = {
1289 .uses_mmap = true,
1290 .default_per_cpu = true,
1291 },
1292 .proc_map_timeout = 500,
1293 },
1294 .tool = {
1295 .sample = process_sample_event,
1296 .fork = perf_event__process_fork,
1297 .exit = perf_event__process_exit,
1298 .comm = perf_event__process_comm,
1299 .mmap = perf_event__process_mmap,
1300 .mmap2 = perf_event__process_mmap2,
1301 .ordered_events = true,
1302 },
1303 };
1304
1305 const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1306 "\n\t\t\t\tDefault: fp";
1307
1308 static bool dry_run;
1309
1310 /*
1311 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1312 * with it and switch to use the library functions in perf_evlist that came
1313 * from builtin-record.c, i.e. use record_opts,
1314 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1315 * using pipes, etc.
1316 */
1317 struct option __record_options[] = {
1318 OPT_CALLBACK('e', "event", &record.evlist, "event",
1319 "event selector. use 'perf list' to list available events",
1320 parse_events_option),
1321 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
1322 "event filter", parse_filter),
1323 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1324 NULL, "don't record events from perf itself",
1325 exclude_perf),
1326 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
1327 "record events on existing process id"),
1328 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
1329 "record events on existing thread id"),
1330 OPT_INTEGER('r', "realtime", &record.realtime_prio,
1331 "collect data with this RT SCHED_FIFO priority"),
1332 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
1333 "collect data without buffering"),
1334 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
1335 "collect raw sample records from all opened counters"),
1336 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
1337 "system-wide collection from all CPUs"),
1338 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
1339 "list of cpus to monitor"),
1340 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
1341 OPT_STRING('o', "output", &record.file.path, "file",
1342 "output file name"),
1343 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1344 &record.opts.no_inherit_set,
1345 "child tasks do not inherit counters"),
1346 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
1347 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1348 "number of mmap data pages and AUX area tracing mmap pages",
1349 record__parse_mmap_pages),
1350 OPT_BOOLEAN(0, "group", &record.opts.group,
1351 "put the counters into a counter group"),
1352 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
1353 NULL, "enables call-graph recording" ,
1354 &record_callchain_opt),
1355 OPT_CALLBACK(0, "call-graph", &record.opts,
1356 "record_mode[,record_size]", record_callchain_help,
1357 &record_parse_callchain_opt),
1358 OPT_INCR('v', "verbose", &verbose,
1359 "be more verbose (show counter open errors, etc)"),
1360 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
1361 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
1362 "per thread counts"),
1363 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
1364 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1365 &record.opts.sample_time_set,
1366 "Record the sample timestamps"),
1367 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
1368 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
1369 "don't sample"),
1370 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1371 &record.no_buildid_cache_set,
1372 "do not update the buildid cache"),
1373 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1374 &record.no_buildid_set,
1375 "do not collect buildids in perf.data"),
1376 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
1377 "monitor event in cgroup name only",
1378 parse_cgroups),
1379 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
1380 "ms to wait before starting measurement after program start"),
1381 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1382 "user to profile"),
1383
1384 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1385 "branch any", "sample any taken branches",
1386 parse_branch_stack),
1387
1388 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1389 "branch filter mask", "branch stack filter modes",
1390 parse_branch_stack),
1391 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1392 "sample by weight (on special events only)"),
1393 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1394 "sample transaction flags (special events only)"),
1395 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1396 "use per-thread mmaps"),
1397 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1398 "sample selected machine registers on interrupt,"
1399 " use -I ? to list register names", parse_regs),
1400 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1401 "Record running/enabled time of read (:S) events"),
1402 OPT_CALLBACK('k', "clockid", &record.opts,
1403 "clockid", "clockid to use for events, see clock_gettime()",
1404 parse_clockid),
1405 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1406 "opts", "AUX area tracing Snapshot Mode", ""),
1407 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1408 "per thread proc mmap processing timeout in ms"),
1409 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1410 "Record context switch events"),
1411 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1412 "Configure all used events to run in kernel space.",
1413 PARSE_OPT_EXCLUSIVE),
1414 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1415 "Configure all used events to run in user space.",
1416 PARSE_OPT_EXCLUSIVE),
1417 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1418 "clang binary to use for compiling BPF scriptlets"),
1419 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1420 "options passed to clang when compiling BPF scriptlets"),
1421 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1422 "file", "vmlinux pathname"),
1423 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1424 "Record build-id of all DSOs regardless of hits"),
1425 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1426 "append timestamp to output filename"),
1427 OPT_BOOLEAN(0, "switch-output", &record.switch_output,
1428 "Switch output when receive SIGUSR2"),
1429 OPT_BOOLEAN(0, "dry-run", &dry_run,
1430 "Parse options then exit"),
1431 OPT_END()
1432 };
1433
1434 struct option *record_options = __record_options;
1435
1436 int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
1437 {
1438 int err;
1439 struct record *rec = &record;
1440 char errbuf[BUFSIZ];
1441
1442 #ifndef HAVE_LIBBPF_SUPPORT
1443 # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1444 set_nobuild('\0', "clang-path", true);
1445 set_nobuild('\0', "clang-opt", true);
1446 # undef set_nobuild
1447 #endif
1448
1449 #ifndef HAVE_BPF_PROLOGUE
1450 # if !defined (HAVE_DWARF_SUPPORT)
1451 # define REASON "NO_DWARF=1"
1452 # elif !defined (HAVE_LIBBPF_SUPPORT)
1453 # define REASON "NO_LIBBPF=1"
1454 # else
1455 # define REASON "this architecture doesn't support BPF prologue"
1456 # endif
1457 # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1458 set_nobuild('\0', "vmlinux", true);
1459 # undef set_nobuild
1460 # undef REASON
1461 #endif
1462
1463 rec->evlist = perf_evlist__new();
1464 if (rec->evlist == NULL)
1465 return -ENOMEM;
1466
1467 perf_config(perf_record_config, rec);
1468
1469 argc = parse_options(argc, argv, record_options, record_usage,
1470 PARSE_OPT_STOP_AT_NON_OPTION);
1471 if (!argc && target__none(&rec->opts.target))
1472 usage_with_options(record_usage, record_options);
1473
1474 if (nr_cgroups && !rec->opts.target.system_wide) {
1475 usage_with_options_msg(record_usage, record_options,
1476 "cgroup monitoring only available in system-wide mode");
1477
1478 }
1479 if (rec->opts.record_switch_events &&
1480 !perf_can_record_switch_events()) {
1481 ui__error("kernel does not support recording context switch events\n");
1482 parse_options_usage(record_usage, record_options, "switch-events", 0);
1483 return -EINVAL;
1484 }
1485
1486 if (rec->switch_output)
1487 rec->timestamp_filename = true;
1488
1489 if (!rec->itr) {
1490 rec->itr = auxtrace_record__init(rec->evlist, &err);
1491 if (err)
1492 return err;
1493 }
1494
1495 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1496 rec->opts.auxtrace_snapshot_opts);
1497 if (err)
1498 return err;
1499
1500 if (dry_run)
1501 return 0;
1502
1503 err = bpf__setup_stdout(rec->evlist);
1504 if (err) {
1505 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1506 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1507 errbuf);
1508 return err;
1509 }
1510
1511 err = -ENOMEM;
1512
1513 symbol__init(NULL);
1514
1515 if (symbol_conf.kptr_restrict)
1516 pr_warning(
1517 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1518 "check /proc/sys/kernel/kptr_restrict.\n\n"
1519 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1520 "file is not found in the buildid cache or in the vmlinux path.\n\n"
1521 "Samples in kernel modules won't be resolved at all.\n\n"
1522 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1523 "even with a suitable vmlinux or kallsyms file.\n\n");
1524
1525 if (rec->no_buildid_cache || rec->no_buildid) {
1526 disable_buildid_cache();
1527 } else if (rec->switch_output) {
1528 /*
1529 * In 'perf record --switch-output', disable buildid
1530 * generation by default to reduce data file switching
1531 * overhead. Still generate buildid if they are required
1532 * explicitly using
1533 *
1534 * perf record --signal-trigger --no-no-buildid \
1535 * --no-no-buildid-cache
1536 *
1537 * Following code equals to:
1538 *
1539 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1540 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1541 * disable_buildid_cache();
1542 */
1543 bool disable = true;
1544
1545 if (rec->no_buildid_set && !rec->no_buildid)
1546 disable = false;
1547 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1548 disable = false;
1549 if (disable) {
1550 rec->no_buildid = true;
1551 rec->no_buildid_cache = true;
1552 disable_buildid_cache();
1553 }
1554 }
1555
1556 if (rec->evlist->nr_entries == 0 &&
1557 perf_evlist__add_default(rec->evlist) < 0) {
1558 pr_err("Not enough memory for event selector list\n");
1559 goto out_symbol_exit;
1560 }
1561
1562 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1563 rec->opts.no_inherit = true;
1564
1565 err = target__validate(&rec->opts.target);
1566 if (err) {
1567 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1568 ui__warning("%s", errbuf);
1569 }
1570
1571 err = target__parse_uid(&rec->opts.target);
1572 if (err) {
1573 int saved_errno = errno;
1574
1575 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1576 ui__error("%s", errbuf);
1577
1578 err = -saved_errno;
1579 goto out_symbol_exit;
1580 }
1581
1582 err = -ENOMEM;
1583 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
1584 usage_with_options(record_usage, record_options);
1585
1586 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1587 if (err)
1588 goto out_symbol_exit;
1589
1590 /*
1591 * We take all buildids when the file contains
1592 * AUX area tracing data because we do not decode the
1593 * trace because it would take too long.
1594 */
1595 if (rec->opts.full_auxtrace)
1596 rec->buildid_all = true;
1597
1598 if (record_opts__config(&rec->opts)) {
1599 err = -EINVAL;
1600 goto out_symbol_exit;
1601 }
1602
1603 err = __cmd_record(&record, argc, argv);
1604 out_symbol_exit:
1605 perf_evlist__delete(rec->evlist);
1606 symbol__exit();
1607 auxtrace_record__free(rec->itr);
1608 return err;
1609 }
1610
1611 static void snapshot_sig_handler(int sig __maybe_unused)
1612 {
1613 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1614 trigger_hit(&auxtrace_snapshot_trigger);
1615 auxtrace_record__snapshot_started = 1;
1616 if (auxtrace_record__snapshot_start(record.itr))
1617 trigger_error(&auxtrace_snapshot_trigger);
1618 }
1619
1620 if (trigger_is_ready(&switch_output_trigger))
1621 trigger_hit(&switch_output_trigger);
1622 }
This page took 0.063821 seconds and 6 git commands to generate.