perf evlist: Rename for_each() macros to for_each_entry()
[deliverable/linux.git] / tools / perf / builtin-record.c
CommitLineData
abaff32a 1/*
bf9e1876
IM
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
abaff32a 7 */
16f762a2 8#include "builtin.h"
bf9e1876
IM
9
10#include "perf.h"
11
6122e4e4 12#include "util/build-id.h"
6eda5838 13#include "util/util.h"
4b6ab94e 14#include <subcmd/parse-options.h>
8ad8db37 15#include "util/parse-events.h"
41840d21 16#include "util/config.h"
6eda5838 17
8f651eae 18#include "util/callchain.h"
f14d5707 19#include "util/cgroup.h"
7c6a1c65 20#include "util/header.h"
66e274f3 21#include "util/event.h"
361c99a6 22#include "util/evlist.h"
69aad6f1 23#include "util/evsel.h"
8f28827a 24#include "util/debug.h"
94c744b6 25#include "util/session.h"
45694aa7 26#include "util/tool.h"
8d06367f 27#include "util/symbol.h"
a12b51c4 28#include "util/cpumap.h"
fd78260b 29#include "util/thread_map.h"
f5fc1412 30#include "util/data.h"
bcc84ec6 31#include "util/perf_regs.h"
ef149c25 32#include "util/auxtrace.h"
46bc29b9 33#include "util/tsc.h"
f00898f4 34#include "util/parse-branch-options.h"
bcc84ec6 35#include "util/parse-regs-options.h"
71dc2326 36#include "util/llvm-utils.h"
8690a2a7 37#include "util/bpf-loader.h"
5f9cf599 38#include "util/trigger.h"
d8871ea7 39#include "asm/bug.h"
7c6a1c65 40
97124d5e 41#include <unistd.h>
de9ac07b 42#include <sched.h>
a41794cd 43#include <sys/mman.h>
2d11c650 44#include <asm/bug.h>
de9ac07b 45
78da39fa 46
8c6f45a7 47struct record {
45694aa7 48 struct perf_tool tool;
b4006796 49 struct record_opts opts;
d20deb64 50 u64 bytes_written;
f5fc1412 51 struct perf_data_file file;
ef149c25 52 struct auxtrace_record *itr;
d20deb64
ACM
53 struct perf_evlist *evlist;
54 struct perf_session *session;
55 const char *progname;
d20deb64 56 int realtime_prio;
d20deb64 57 bool no_buildid;
d2db9a98 58 bool no_buildid_set;
d20deb64 59 bool no_buildid_cache;
d2db9a98 60 bool no_buildid_cache_set;
6156681b 61 bool buildid_all;
ecfd7a9c 62 bool timestamp_filename;
3c1cb7e3 63 bool switch_output;
9f065194 64 unsigned long long samples;
0f82ebc4 65};
a21ca2ca 66
8c6f45a7 67static int record__write(struct record *rec, void *bf, size_t size)
f5970550 68{
cf8b2e69 69 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
50a9b868
JO
70 pr_err("failed to write perf data, error: %m\n");
71 return -1;
f5970550 72 }
8d3eca20 73
cf8b2e69 74 rec->bytes_written += size;
8d3eca20 75 return 0;
f5970550
PZ
76}
77
45694aa7 78static int process_synthesized_event(struct perf_tool *tool,
d20deb64 79 union perf_event *event,
1d037ca1
IT
80 struct perf_sample *sample __maybe_unused,
81 struct machine *machine __maybe_unused)
234fbbf5 82{
8c6f45a7
ACM
83 struct record *rec = container_of(tool, struct record, tool);
84 return record__write(rec, event, event->header.size);
234fbbf5
ACM
85}
86
3a62a7b8
WN
87static int
88backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
89{
90 struct perf_event_header *pheader;
91 u64 evt_head = head;
92 int size = mask + 1;
93
94 pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
95 pheader = (struct perf_event_header *)(buf + (head & mask));
96 *start = head;
97 while (true) {
98 if (evt_head - head >= (unsigned int)size) {
99 pr_debug("Finshed reading backward ring buffer: rewind\n");
100 if (evt_head - head > (unsigned int)size)
101 evt_head -= pheader->size;
102 *end = evt_head;
103 return 0;
104 }
105
106 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
107
108 if (pheader->size == 0) {
109 pr_debug("Finshed reading backward ring buffer: get start\n");
110 *end = evt_head;
111 return 0;
112 }
113
114 evt_head += pheader->size;
115 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
116 }
117 WARN_ONCE(1, "Shouldn't get here\n");
118 return -1;
119}
120
121static int
122rb_find_range(struct perf_evlist *evlist,
123 void *data, int mask, u64 head, u64 old,
124 u64 *start, u64 *end)
125{
126 if (!evlist->backward) {
127 *start = old;
128 *end = head;
129 return 0;
130 }
131
132 return backward_rb_find_range(data, mask, head, start, end);
133}
134
e5685730 135static int record__mmap_read(struct record *rec, int idx)
de9ac07b 136{
e5685730 137 struct perf_mmap *md = &rec->evlist->mmap[idx];
7b8283b5
DA
138 u64 head = perf_mmap__read_head(md);
139 u64 old = md->prev;
09fa4f40 140 u64 end = head, start = old;
918512b4 141 unsigned char *data = md->base + page_size;
de9ac07b
PZ
142 unsigned long size;
143 void *buf;
8d3eca20 144 int rc = 0;
de9ac07b 145
3a62a7b8
WN
146 if (rb_find_range(rec->evlist, data, md->mask, head,
147 old, &start, &end))
148 return -1;
149
09fa4f40 150 if (start == end)
8d3eca20 151 return 0;
dc82009a 152
d20deb64 153 rec->samples++;
de9ac07b 154
09fa4f40 155 size = end - start;
2d11c650
WN
156 if (size > (unsigned long)(md->mask) + 1) {
157 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
158
159 md->prev = head;
160 perf_evlist__mmap_consume(rec->evlist, idx);
161 return 0;
162 }
de9ac07b 163
09fa4f40
WN
164 if ((start & md->mask) + size != (end & md->mask)) {
165 buf = &data[start & md->mask];
166 size = md->mask + 1 - (start & md->mask);
167 start += size;
021e9f47 168
8c6f45a7 169 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
170 rc = -1;
171 goto out;
172 }
de9ac07b
PZ
173 }
174
09fa4f40
WN
175 buf = &data[start & md->mask];
176 size = end - start;
177 start += size;
021e9f47 178
8c6f45a7 179 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
180 rc = -1;
181 goto out;
182 }
de9ac07b 183
09fa4f40 184 md->prev = head;
e5685730 185 perf_evlist__mmap_consume(rec->evlist, idx);
8d3eca20
DA
186out:
187 return rc;
de9ac07b
PZ
188}
189
2dd6d8a1
AH
190static volatile int done;
191static volatile int signr = -1;
192static volatile int child_finished;
c0bdc1c4 193
2dd6d8a1 194static volatile int auxtrace_record__snapshot_started;
5f9cf599 195static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
3c1cb7e3 196static DEFINE_TRIGGER(switch_output_trigger);
2dd6d8a1
AH
197
198static void sig_handler(int sig)
199{
200 if (sig == SIGCHLD)
201 child_finished = 1;
202 else
203 signr = sig;
204
205 done = 1;
206}
207
208static void record__sig_exit(void)
209{
210 if (signr == -1)
211 return;
212
213 signal(signr, SIG_DFL);
214 raise(signr);
215}
216
e31f0d01
AH
217#ifdef HAVE_AUXTRACE_SUPPORT
218
ef149c25
AH
219static int record__process_auxtrace(struct perf_tool *tool,
220 union perf_event *event, void *data1,
221 size_t len1, void *data2, size_t len2)
222{
223 struct record *rec = container_of(tool, struct record, tool);
99fa2984 224 struct perf_data_file *file = &rec->file;
ef149c25
AH
225 size_t padding;
226 u8 pad[8] = {0};
227
99fa2984
AH
228 if (!perf_data_file__is_pipe(file)) {
229 off_t file_offset;
230 int fd = perf_data_file__fd(file);
231 int err;
232
233 file_offset = lseek(fd, 0, SEEK_CUR);
234 if (file_offset == -1)
235 return -1;
236 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
237 event, file_offset);
238 if (err)
239 return err;
240 }
241
ef149c25
AH
242 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
243 padding = (len1 + len2) & 7;
244 if (padding)
245 padding = 8 - padding;
246
247 record__write(rec, event, event->header.size);
248 record__write(rec, data1, len1);
249 if (len2)
250 record__write(rec, data2, len2);
251 record__write(rec, &pad, padding);
252
253 return 0;
254}
255
256static int record__auxtrace_mmap_read(struct record *rec,
257 struct auxtrace_mmap *mm)
258{
259 int ret;
260
261 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
262 record__process_auxtrace);
263 if (ret < 0)
264 return ret;
265
266 if (ret)
267 rec->samples++;
268
269 return 0;
270}
271
2dd6d8a1
AH
272static int record__auxtrace_mmap_read_snapshot(struct record *rec,
273 struct auxtrace_mmap *mm)
274{
275 int ret;
276
277 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
278 record__process_auxtrace,
279 rec->opts.auxtrace_snapshot_size);
280 if (ret < 0)
281 return ret;
282
283 if (ret)
284 rec->samples++;
285
286 return 0;
287}
288
289static int record__auxtrace_read_snapshot_all(struct record *rec)
290{
291 int i;
292 int rc = 0;
293
294 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
295 struct auxtrace_mmap *mm =
296 &rec->evlist->mmap[i].auxtrace_mmap;
297
298 if (!mm->base)
299 continue;
300
301 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
302 rc = -1;
303 goto out;
304 }
305 }
306out:
307 return rc;
308}
309
310static void record__read_auxtrace_snapshot(struct record *rec)
311{
312 pr_debug("Recording AUX area tracing snapshot\n");
313 if (record__auxtrace_read_snapshot_all(rec) < 0) {
5f9cf599 314 trigger_error(&auxtrace_snapshot_trigger);
2dd6d8a1 315 } else {
5f9cf599
WN
316 if (auxtrace_record__snapshot_finish(rec->itr))
317 trigger_error(&auxtrace_snapshot_trigger);
318 else
319 trigger_ready(&auxtrace_snapshot_trigger);
2dd6d8a1
AH
320 }
321}
322
e31f0d01
AH
323#else
324
325static inline
326int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
327 struct auxtrace_mmap *mm __maybe_unused)
328{
329 return 0;
330}
331
2dd6d8a1
AH
332static inline
333void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
de9ac07b 334{
f7b7c26e
PZ
335}
336
2dd6d8a1
AH
337static inline
338int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
f7b7c26e 339{
2dd6d8a1 340 return 0;
de9ac07b
PZ
341}
342
2dd6d8a1
AH
343#endif
344
8c6f45a7 345static int record__open(struct record *rec)
dd7927f4 346{
56e52e85 347 char msg[512];
6a4bb04c 348 struct perf_evsel *pos;
d20deb64
ACM
349 struct perf_evlist *evlist = rec->evlist;
350 struct perf_session *session = rec->session;
b4006796 351 struct record_opts *opts = &rec->opts;
8d3eca20 352 int rc = 0;
dd7927f4 353
e68ae9cf 354 perf_evlist__config(evlist, opts, &callchain_param);
cac21425 355
e5cadb93 356 evlist__for_each_entry(evlist, pos) {
dd7927f4 357try_again:
d988d5ee 358 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
56e52e85 359 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
d6d901c2 360 if (verbose)
c0a54341 361 ui__warning("%s\n", msg);
d6d901c2
ZY
362 goto try_again;
363 }
ca6a4258 364
56e52e85
ACM
365 rc = -errno;
366 perf_evsel__open_strerror(pos, &opts->target,
367 errno, msg, sizeof(msg));
368 ui__error("%s\n", msg);
8d3eca20 369 goto out;
c171b552
LZ
370 }
371 }
a43d3f08 372
23d4aad4
ACM
373 if (perf_evlist__apply_filters(evlist, &pos)) {
374 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
375 pos->filter, perf_evsel__name(pos), errno,
35550da3 376 strerror_r(errno, msg, sizeof(msg)));
8d3eca20
DA
377 rc = -1;
378 goto out;
0a102479
FW
379 }
380
ef149c25 381 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
2dd6d8a1
AH
382 opts->auxtrace_mmap_pages,
383 opts->auxtrace_snapshot_mode) < 0) {
8d3eca20
DA
384 if (errno == EPERM) {
385 pr_err("Permission error mapping pages.\n"
386 "Consider increasing "
387 "/proc/sys/kernel/perf_event_mlock_kb,\n"
388 "or try again with a smaller value of -m/--mmap_pages.\n"
ef149c25
AH
389 "(current value: %u,%u)\n",
390 opts->mmap_pages, opts->auxtrace_mmap_pages);
8d3eca20 391 rc = -errno;
8d3eca20 392 } else {
35550da3
MH
393 pr_err("failed to mmap with %d (%s)\n", errno,
394 strerror_r(errno, msg, sizeof(msg)));
95c36561
WN
395 if (errno)
396 rc = -errno;
397 else
398 rc = -EINVAL;
8d3eca20
DA
399 }
400 goto out;
18e60939 401 }
0a27d7f9 402
563aecb2 403 session->evlist = evlist;
7b56cce2 404 perf_session__set_id_hdr_size(session);
8d3eca20
DA
405out:
406 return rc;
16c8a109
PZ
407}
408
e3d59112
NK
409static int process_sample_event(struct perf_tool *tool,
410 union perf_event *event,
411 struct perf_sample *sample,
412 struct perf_evsel *evsel,
413 struct machine *machine)
414{
415 struct record *rec = container_of(tool, struct record, tool);
416
417 rec->samples++;
418
419 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
420}
421
8c6f45a7 422static int process_buildids(struct record *rec)
6122e4e4 423{
f5fc1412
JO
424 struct perf_data_file *file = &rec->file;
425 struct perf_session *session = rec->session;
6122e4e4 426
457ae94a 427 if (file->size == 0)
9f591fd7
ACM
428 return 0;
429
00dc8657
NK
430 /*
431 * During this process, it'll load kernel map and replace the
432 * dso->long_name to a real pathname it found. In this case
433 * we prefer the vmlinux path like
434 * /lib/modules/3.16.4/build/vmlinux
435 *
436 * rather than build-id path (in debug directory).
437 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
438 */
439 symbol_conf.ignore_vmlinux_buildid = true;
440
6156681b
NK
441 /*
442 * If --buildid-all is given, it marks all DSO regardless of hits,
443 * so no need to process samples.
444 */
445 if (rec->buildid_all)
446 rec->tool.sample = NULL;
447
b7b61cbe 448 return perf_session__process_events(session);
6122e4e4
ACM
449}
450
8115d60c 451static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
452{
453 int err;
45694aa7 454 struct perf_tool *tool = data;
a1645ce1
ZY
455 /*
456 *As for guest kernel when processing subcommand record&report,
457 *we arrange module mmap prior to guest kernel mmap and trigger
458 *a preload dso because default guest module symbols are loaded
459 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
460 *method is used to avoid symbol missing when the first addr is
461 *in module instead of in guest kernel.
462 */
45694aa7 463 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 464 machine);
a1645ce1
ZY
465 if (err < 0)
466 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 467 " relocation symbol.\n", machine->pid);
a1645ce1 468
a1645ce1
ZY
469 /*
470 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
471 * have no _text sometimes.
472 */
45694aa7 473 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 474 machine);
a1645ce1
ZY
475 if (err < 0)
476 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 477 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
478}
479
98402807
FW
480static struct perf_event_header finished_round_event = {
481 .size = sizeof(struct perf_event_header),
482 .type = PERF_RECORD_FINISHED_ROUND,
483};
484
8c6f45a7 485static int record__mmap_read_all(struct record *rec)
98402807 486{
dcabb507 487 u64 bytes_written = rec->bytes_written;
0e2e63dd 488 int i;
8d3eca20 489 int rc = 0;
98402807 490
d20deb64 491 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
ef149c25
AH
492 struct auxtrace_mmap *mm = &rec->evlist->mmap[i].auxtrace_mmap;
493
8d3eca20 494 if (rec->evlist->mmap[i].base) {
e5685730 495 if (record__mmap_read(rec, i) != 0) {
8d3eca20
DA
496 rc = -1;
497 goto out;
498 }
499 }
ef149c25 500
2dd6d8a1 501 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
ef149c25
AH
502 record__auxtrace_mmap_read(rec, mm) != 0) {
503 rc = -1;
504 goto out;
505 }
98402807
FW
506 }
507
dcabb507
JO
508 /*
509 * Mark the round finished in case we wrote
510 * at least one event.
511 */
512 if (bytes_written != rec->bytes_written)
513 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
8d3eca20
DA
514
515out:
516 return rc;
98402807
FW
517}
518
8c6f45a7 519static void record__init_features(struct record *rec)
57706abc 520{
57706abc
DA
521 struct perf_session *session = rec->session;
522 int feat;
523
524 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
525 perf_header__set_feat(&session->header, feat);
526
527 if (rec->no_buildid)
528 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
529
3e2be2da 530 if (!have_tracepoints(&rec->evlist->entries))
57706abc
DA
531 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
532
533 if (!rec->opts.branch_stack)
534 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
ef149c25
AH
535
536 if (!rec->opts.full_auxtrace)
537 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
ffa517ad
JO
538
539 perf_header__clear_feat(&session->header, HEADER_STAT);
57706abc
DA
540}
541
e1ab48ba
WN
542static void
543record__finish_output(struct record *rec)
544{
545 struct perf_data_file *file = &rec->file;
546 int fd = perf_data_file__fd(file);
547
548 if (file->is_pipe)
549 return;
550
551 rec->session->header.data_size += rec->bytes_written;
552 file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
553
554 if (!rec->no_buildid) {
555 process_buildids(rec);
556
557 if (rec->buildid_all)
558 dsos__hit_all(rec->session);
559 }
560 perf_session__write_header(rec->session, rec->evlist, fd, true);
561
562 return;
563}
564
be7b0c9e
WN
565static int record__synthesize_workload(struct record *rec)
566{
567 struct {
568 struct thread_map map;
569 struct thread_map_data map_data;
570 } thread_map;
571
572 thread_map.map.nr = 1;
573 thread_map.map.map[0].pid = rec->evlist->workload.pid;
574 thread_map.map.map[0].comm = NULL;
575 return perf_event__synthesize_thread_map(&rec->tool, &thread_map.map,
576 process_synthesized_event,
577 &rec->session->machines.host,
578 rec->opts.sample_address,
579 rec->opts.proc_map_timeout);
580}
581
3c1cb7e3
WN
582static int record__synthesize(struct record *rec);
583
ecfd7a9c
WN
584static int
585record__switch_output(struct record *rec, bool at_exit)
586{
587 struct perf_data_file *file = &rec->file;
588 int fd, err;
589
590 /* Same Size: "2015122520103046"*/
591 char timestamp[] = "InvalidTimestamp";
592
593 rec->samples = 0;
594 record__finish_output(rec);
595 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
596 if (err) {
597 pr_err("Failed to get current timestamp\n");
598 return -EINVAL;
599 }
600
601 fd = perf_data_file__switch(file, timestamp,
602 rec->session->header.data_offset,
603 at_exit);
604 if (fd >= 0 && !at_exit) {
605 rec->bytes_written = 0;
606 rec->session->header.data_size = 0;
607 }
608
609 if (!quiet)
610 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
611 file->path, timestamp);
3c1cb7e3
WN
612
613 /* Output tracking events */
be7b0c9e 614 if (!at_exit) {
3c1cb7e3
WN
615 record__synthesize(rec);
616
be7b0c9e
WN
617 /*
618 * In 'perf record --switch-output' without -a,
619 * record__synthesize() in record__switch_output() won't
620 * generate tracking events because there's no thread_map
621 * in evlist. Which causes newly created perf.data doesn't
622 * contain map and comm information.
623 * Create a fake thread_map and directly call
624 * perf_event__synthesize_thread_map() for those events.
625 */
626 if (target__none(&rec->opts.target))
627 record__synthesize_workload(rec);
628 }
ecfd7a9c
WN
629 return fd;
630}
631
f33cbe72
ACM
632static volatile int workload_exec_errno;
633
634/*
635 * perf_evlist__prepare_workload will send a SIGUSR1
636 * if the fork fails, since we asked by setting its
637 * want_signal to true.
638 */
45604710
NK
639static void workload_exec_failed_signal(int signo __maybe_unused,
640 siginfo_t *info,
f33cbe72
ACM
641 void *ucontext __maybe_unused)
642{
643 workload_exec_errno = info->si_value.sival_int;
644 done = 1;
f33cbe72
ACM
645 child_finished = 1;
646}
647
2dd6d8a1
AH
648static void snapshot_sig_handler(int sig);
649
46bc29b9
AH
650int __weak
651perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
652 struct perf_tool *tool __maybe_unused,
653 perf_event__handler_t process __maybe_unused,
654 struct machine *machine __maybe_unused)
655{
656 return 0;
657}
658
c45628b0
WN
659static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
660{
661 if (rec->evlist && rec->evlist->mmap && rec->evlist->mmap[0].base)
662 return rec->evlist->mmap[0].base;
663 return NULL;
664}
665
c45c86eb
WN
666static int record__synthesize(struct record *rec)
667{
668 struct perf_session *session = rec->session;
669 struct machine *machine = &session->machines.host;
670 struct perf_data_file *file = &rec->file;
671 struct record_opts *opts = &rec->opts;
672 struct perf_tool *tool = &rec->tool;
673 int fd = perf_data_file__fd(file);
674 int err = 0;
675
676 if (file->is_pipe) {
677 err = perf_event__synthesize_attrs(tool, session,
678 process_synthesized_event);
679 if (err < 0) {
680 pr_err("Couldn't synthesize attrs.\n");
681 goto out;
682 }
683
684 if (have_tracepoints(&rec->evlist->entries)) {
685 /*
686 * FIXME err <= 0 here actually means that
687 * there were no tracepoints so its not really
688 * an error, just that we don't need to
689 * synthesize anything. We really have to
690 * return this more properly and also
691 * propagate errors that now are calling die()
692 */
693 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
694 process_synthesized_event);
695 if (err <= 0) {
696 pr_err("Couldn't record tracing data.\n");
697 goto out;
698 }
699 rec->bytes_written += err;
700 }
701 }
702
c45628b0 703 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
46bc29b9
AH
704 process_synthesized_event, machine);
705 if (err)
706 goto out;
707
c45c86eb
WN
708 if (rec->opts.full_auxtrace) {
709 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
710 session, process_synthesized_event);
711 if (err)
712 goto out;
713 }
714
715 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
716 machine);
717 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
718 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
719 "Check /proc/kallsyms permission or run as root.\n");
720
721 err = perf_event__synthesize_modules(tool, process_synthesized_event,
722 machine);
723 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
724 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
725 "Check /proc/modules permission or run as root.\n");
726
727 if (perf_guest) {
728 machines__process_guests(&session->machines,
729 perf_event__synthesize_guest_os, tool);
730 }
731
732 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
733 process_synthesized_event, opts->sample_address,
734 opts->proc_map_timeout);
735out:
736 return err;
737}
738
8c6f45a7 739static int __cmd_record(struct record *rec, int argc, const char **argv)
16c8a109 740{
57706abc 741 int err;
45604710 742 int status = 0;
8b412664 743 unsigned long waking = 0;
46be604b 744 const bool forks = argc > 0;
23346f21 745 struct machine *machine;
45694aa7 746 struct perf_tool *tool = &rec->tool;
b4006796 747 struct record_opts *opts = &rec->opts;
f5fc1412 748 struct perf_data_file *file = &rec->file;
d20deb64 749 struct perf_session *session;
6dcf45ef 750 bool disabled = false, draining = false;
42aa276f 751 int fd;
de9ac07b 752
d20deb64 753 rec->progname = argv[0];
33e49ea7 754
45604710 755 atexit(record__sig_exit);
f5970550
PZ
756 signal(SIGCHLD, sig_handler);
757 signal(SIGINT, sig_handler);
804f7ac7 758 signal(SIGTERM, sig_handler);
c0bdc1c4 759
3c1cb7e3 760 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output) {
2dd6d8a1 761 signal(SIGUSR2, snapshot_sig_handler);
3c1cb7e3
WN
762 if (rec->opts.auxtrace_snapshot_mode)
763 trigger_on(&auxtrace_snapshot_trigger);
764 if (rec->switch_output)
765 trigger_on(&switch_output_trigger);
c0bdc1c4 766 } else {
2dd6d8a1 767 signal(SIGUSR2, SIG_IGN);
c0bdc1c4 768 }
f5970550 769
b7b61cbe 770 session = perf_session__new(file, false, tool);
94c744b6 771 if (session == NULL) {
ffa91880 772 pr_err("Perf session creation failed.\n");
a9a70bbc
ACM
773 return -1;
774 }
775
42aa276f 776 fd = perf_data_file__fd(file);
d20deb64
ACM
777 rec->session = session;
778
8c6f45a7 779 record__init_features(rec);
330aa675 780
d4db3f16 781 if (forks) {
3e2be2da 782 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
f5fc1412 783 argv, file->is_pipe,
735f7e0b 784 workload_exec_failed_signal);
35b9d88e
ACM
785 if (err < 0) {
786 pr_err("Couldn't run the workload!\n");
45604710 787 status = err;
35b9d88e 788 goto out_delete_session;
856e9660 789 }
856e9660
PZ
790 }
791
8c6f45a7 792 if (record__open(rec) != 0) {
8d3eca20 793 err = -1;
45604710 794 goto out_child;
8d3eca20 795 }
de9ac07b 796
8690a2a7
WN
797 err = bpf__apply_obj_config();
798 if (err) {
799 char errbuf[BUFSIZ];
800
801 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
802 pr_err("ERROR: Apply config to BPF failed: %s\n",
803 errbuf);
804 goto out_child;
805 }
806
cca8482c
AH
807 /*
808 * Normally perf_session__new would do this, but it doesn't have the
809 * evlist.
810 */
811 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
812 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
813 rec->tool.ordered_events = false;
814 }
815
3e2be2da 816 if (!rec->evlist->nr_groups)
a8bb559b
NK
817 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
818
f5fc1412 819 if (file->is_pipe) {
42aa276f 820 err = perf_header__write_pipe(fd);
529870e3 821 if (err < 0)
45604710 822 goto out_child;
563aecb2 823 } else {
42aa276f 824 err = perf_session__write_header(session, rec->evlist, fd, false);
d5eed904 825 if (err < 0)
45604710 826 goto out_child;
56b03f3c
ACM
827 }
828
d3665498 829 if (!rec->no_buildid
e20960c0 830 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 831 pr_err("Couldn't generate buildids. "
e20960c0 832 "Use --no-buildid to profile anyway.\n");
8d3eca20 833 err = -1;
45604710 834 goto out_child;
e20960c0
RR
835 }
836
34ba5122 837 machine = &session->machines.host;
743eb868 838
c45c86eb
WN
839 err = record__synthesize(rec);
840 if (err < 0)
45604710 841 goto out_child;
8d3eca20 842
d20deb64 843 if (rec->realtime_prio) {
de9ac07b
PZ
844 struct sched_param param;
845
d20deb64 846 param.sched_priority = rec->realtime_prio;
de9ac07b 847 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 848 pr_err("Could not set realtime priority.\n");
8d3eca20 849 err = -1;
45604710 850 goto out_child;
de9ac07b
PZ
851 }
852 }
853
774cb499
JO
854 /*
855 * When perf is starting the traced process, all the events
856 * (apart from group members) have enable_on_exec=1 set,
857 * so don't spoil it by prematurely enabling them.
858 */
6619a53e 859 if (!target__none(&opts->target) && !opts->initial_delay)
3e2be2da 860 perf_evlist__enable(rec->evlist);
764e16a3 861
856e9660
PZ
862 /*
863 * Let the child rip
864 */
e803cf97 865 if (forks) {
e5bed564
NK
866 union perf_event *event;
867
868 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
869 if (event == NULL) {
870 err = -ENOMEM;
871 goto out_child;
872 }
873
e803cf97
NK
874 /*
875 * Some H/W events are generated before COMM event
876 * which is emitted during exec(), so perf script
877 * cannot see a correct process name for those events.
878 * Synthesize COMM event to prevent it.
879 */
e5bed564 880 perf_event__synthesize_comm(tool, event,
e803cf97
NK
881 rec->evlist->workload.pid,
882 process_synthesized_event,
883 machine);
e5bed564 884 free(event);
e803cf97 885
3e2be2da 886 perf_evlist__start_workload(rec->evlist);
e803cf97 887 }
856e9660 888
6619a53e
AK
889 if (opts->initial_delay) {
890 usleep(opts->initial_delay * 1000);
891 perf_evlist__enable(rec->evlist);
892 }
893
5f9cf599 894 trigger_ready(&auxtrace_snapshot_trigger);
3c1cb7e3 895 trigger_ready(&switch_output_trigger);
649c48a9 896 for (;;) {
9f065194 897 unsigned long long hits = rec->samples;
de9ac07b 898
8c6f45a7 899 if (record__mmap_read_all(rec) < 0) {
5f9cf599 900 trigger_error(&auxtrace_snapshot_trigger);
3c1cb7e3 901 trigger_error(&switch_output_trigger);
8d3eca20 902 err = -1;
45604710 903 goto out_child;
8d3eca20 904 }
de9ac07b 905
2dd6d8a1
AH
906 if (auxtrace_record__snapshot_started) {
907 auxtrace_record__snapshot_started = 0;
5f9cf599 908 if (!trigger_is_error(&auxtrace_snapshot_trigger))
2dd6d8a1 909 record__read_auxtrace_snapshot(rec);
5f9cf599 910 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
2dd6d8a1
AH
911 pr_err("AUX area tracing snapshot failed\n");
912 err = -1;
913 goto out_child;
914 }
915 }
916
3c1cb7e3
WN
917 if (trigger_is_hit(&switch_output_trigger)) {
918 trigger_ready(&switch_output_trigger);
919
920 if (!quiet)
921 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
922 waking);
923 waking = 0;
924 fd = record__switch_output(rec, false);
925 if (fd < 0) {
926 pr_err("Failed to switch to new file\n");
927 trigger_error(&switch_output_trigger);
928 err = fd;
929 goto out_child;
930 }
931 }
932
d20deb64 933 if (hits == rec->samples) {
6dcf45ef 934 if (done || draining)
649c48a9 935 break;
f66a889d 936 err = perf_evlist__poll(rec->evlist, -1);
a515114f
JO
937 /*
938 * Propagate error, only if there's any. Ignore positive
939 * number of returned events and interrupt error.
940 */
941 if (err > 0 || (err < 0 && errno == EINTR))
45604710 942 err = 0;
8b412664 943 waking++;
6dcf45ef
ACM
944
945 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
946 draining = true;
8b412664
PZ
947 }
948
774cb499
JO
949 /*
950 * When perf is starting the traced process, at the end events
951 * die with the process and we wait for that. Thus no need to
952 * disable events in this case.
953 */
602ad878 954 if (done && !disabled && !target__none(&opts->target)) {
5f9cf599 955 trigger_off(&auxtrace_snapshot_trigger);
3e2be2da 956 perf_evlist__disable(rec->evlist);
2711926a
JO
957 disabled = true;
958 }
de9ac07b 959 }
5f9cf599 960 trigger_off(&auxtrace_snapshot_trigger);
3c1cb7e3 961 trigger_off(&switch_output_trigger);
de9ac07b 962
f33cbe72 963 if (forks && workload_exec_errno) {
35550da3 964 char msg[STRERR_BUFSIZE];
f33cbe72
ACM
965 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
966 pr_err("Workload failed: %s\n", emsg);
967 err = -1;
45604710 968 goto out_child;
f33cbe72
ACM
969 }
970
e3d59112 971 if (!quiet)
45604710 972 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
b44308f5 973
45604710
NK
974out_child:
975 if (forks) {
976 int exit_status;
addc2785 977
45604710
NK
978 if (!child_finished)
979 kill(rec->evlist->workload.pid, SIGTERM);
980
981 wait(&exit_status);
982
983 if (err < 0)
984 status = err;
985 else if (WIFEXITED(exit_status))
986 status = WEXITSTATUS(exit_status);
987 else if (WIFSIGNALED(exit_status))
988 signr = WTERMSIG(exit_status);
989 } else
990 status = err;
991
e3d59112
NK
992 /* this will be recalculated during process_buildids() */
993 rec->samples = 0;
994
ecfd7a9c
WN
995 if (!err) {
996 if (!rec->timestamp_filename) {
997 record__finish_output(rec);
998 } else {
999 fd = record__switch_output(rec, true);
1000 if (fd < 0) {
1001 status = fd;
1002 goto out_delete_session;
1003 }
1004 }
1005 }
39d17dac 1006
e3d59112
NK
1007 if (!err && !quiet) {
1008 char samples[128];
ecfd7a9c
WN
1009 const char *postfix = rec->timestamp_filename ?
1010 ".<timestamp>" : "";
e3d59112 1011
ef149c25 1012 if (rec->samples && !rec->opts.full_auxtrace)
e3d59112
NK
1013 scnprintf(samples, sizeof(samples),
1014 " (%" PRIu64 " samples)", rec->samples);
1015 else
1016 samples[0] = '\0';
1017
ecfd7a9c 1018 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
e3d59112 1019 perf_data_file__size(file) / 1024.0 / 1024.0,
ecfd7a9c 1020 file->path, postfix, samples);
e3d59112
NK
1021 }
1022
39d17dac
ACM
1023out_delete_session:
1024 perf_session__delete(session);
45604710 1025 return status;
de9ac07b 1026}
0e9b20b8 1027
0883e820 1028static void callchain_debug(struct callchain_param *callchain)
09b0fd45 1029{
aad2b21c 1030 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
a601fdff 1031
0883e820 1032 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
26d33022 1033
0883e820 1034 if (callchain->record_mode == CALLCHAIN_DWARF)
09b0fd45 1035 pr_debug("callchain: stack dump size %d\n",
0883e820 1036 callchain->dump_size);
09b0fd45
JO
1037}
1038
0883e820
ACM
1039int record_opts__parse_callchain(struct record_opts *record,
1040 struct callchain_param *callchain,
1041 const char *arg, bool unset)
09b0fd45 1042{
09b0fd45 1043 int ret;
0883e820 1044 callchain->enabled = !unset;
eb853e80 1045
09b0fd45
JO
1046 /* --no-call-graph */
1047 if (unset) {
0883e820 1048 callchain->record_mode = CALLCHAIN_NONE;
09b0fd45
JO
1049 pr_debug("callchain: disabled\n");
1050 return 0;
1051 }
1052
0883e820 1053 ret = parse_callchain_record_opt(arg, callchain);
5c0cf224
JO
1054 if (!ret) {
1055 /* Enable data address sampling for DWARF unwind. */
0883e820 1056 if (callchain->record_mode == CALLCHAIN_DWARF)
5c0cf224 1057 record->sample_address = true;
0883e820 1058 callchain_debug(callchain);
5c0cf224 1059 }
26d33022
JO
1060
1061 return ret;
1062}
1063
0883e820
ACM
1064int record_parse_callchain_opt(const struct option *opt,
1065 const char *arg,
1066 int unset)
1067{
1068 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
1069}
1070
c421e80b 1071int record_callchain_opt(const struct option *opt,
09b0fd45
JO
1072 const char *arg __maybe_unused,
1073 int unset __maybe_unused)
1074{
2ddd5c04 1075 struct callchain_param *callchain = opt->value;
c421e80b 1076
2ddd5c04 1077 callchain->enabled = true;
09b0fd45 1078
2ddd5c04
ACM
1079 if (callchain->record_mode == CALLCHAIN_NONE)
1080 callchain->record_mode = CALLCHAIN_FP;
eb853e80 1081
2ddd5c04 1082 callchain_debug(callchain);
09b0fd45
JO
1083 return 0;
1084}
1085
eb853e80
JO
1086static int perf_record_config(const char *var, const char *value, void *cb)
1087{
7a29c087
NK
1088 struct record *rec = cb;
1089
1090 if (!strcmp(var, "record.build-id")) {
1091 if (!strcmp(value, "cache"))
1092 rec->no_buildid_cache = false;
1093 else if (!strcmp(value, "no-cache"))
1094 rec->no_buildid_cache = true;
1095 else if (!strcmp(value, "skip"))
1096 rec->no_buildid = true;
1097 else
1098 return -1;
1099 return 0;
1100 }
eb853e80 1101 if (!strcmp(var, "record.call-graph"))
5a2e5e85 1102 var = "call-graph.record-mode"; /* fall-through */
eb853e80
JO
1103
1104 return perf_default_config(var, value, cb);
1105}
1106
814c8c38
PZ
1107struct clockid_map {
1108 const char *name;
1109 int clockid;
1110};
1111
1112#define CLOCKID_MAP(n, c) \
1113 { .name = n, .clockid = (c), }
1114
1115#define CLOCKID_END { .name = NULL, }
1116
1117
1118/*
1119 * Add the missing ones, we need to build on many distros...
1120 */
1121#ifndef CLOCK_MONOTONIC_RAW
1122#define CLOCK_MONOTONIC_RAW 4
1123#endif
1124#ifndef CLOCK_BOOTTIME
1125#define CLOCK_BOOTTIME 7
1126#endif
1127#ifndef CLOCK_TAI
1128#define CLOCK_TAI 11
1129#endif
1130
1131static const struct clockid_map clockids[] = {
1132 /* available for all events, NMI safe */
1133 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1134 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1135
1136 /* available for some events */
1137 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1138 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1139 CLOCKID_MAP("tai", CLOCK_TAI),
1140
1141 /* available for the lazy */
1142 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1143 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1144 CLOCKID_MAP("real", CLOCK_REALTIME),
1145 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1146
1147 CLOCKID_END,
1148};
1149
1150static int parse_clockid(const struct option *opt, const char *str, int unset)
1151{
1152 struct record_opts *opts = (struct record_opts *)opt->value;
1153 const struct clockid_map *cm;
1154 const char *ostr = str;
1155
1156 if (unset) {
1157 opts->use_clockid = 0;
1158 return 0;
1159 }
1160
1161 /* no arg passed */
1162 if (!str)
1163 return 0;
1164
1165 /* no setting it twice */
1166 if (opts->use_clockid)
1167 return -1;
1168
1169 opts->use_clockid = true;
1170
1171 /* if its a number, we're done */
1172 if (sscanf(str, "%d", &opts->clockid) == 1)
1173 return 0;
1174
1175 /* allow a "CLOCK_" prefix to the name */
1176 if (!strncasecmp(str, "CLOCK_", 6))
1177 str += 6;
1178
1179 for (cm = clockids; cm->name; cm++) {
1180 if (!strcasecmp(str, cm->name)) {
1181 opts->clockid = cm->clockid;
1182 return 0;
1183 }
1184 }
1185
1186 opts->use_clockid = false;
1187 ui__warning("unknown clockid %s, check man page\n", ostr);
1188 return -1;
1189}
1190
e9db1310
AH
1191static int record__parse_mmap_pages(const struct option *opt,
1192 const char *str,
1193 int unset __maybe_unused)
1194{
1195 struct record_opts *opts = opt->value;
1196 char *s, *p;
1197 unsigned int mmap_pages;
1198 int ret;
1199
1200 if (!str)
1201 return -EINVAL;
1202
1203 s = strdup(str);
1204 if (!s)
1205 return -ENOMEM;
1206
1207 p = strchr(s, ',');
1208 if (p)
1209 *p = '\0';
1210
1211 if (*s) {
1212 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1213 if (ret)
1214 goto out_free;
1215 opts->mmap_pages = mmap_pages;
1216 }
1217
1218 if (!p) {
1219 ret = 0;
1220 goto out_free;
1221 }
1222
1223 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1224 if (ret)
1225 goto out_free;
1226
1227 opts->auxtrace_mmap_pages = mmap_pages;
1228
1229out_free:
1230 free(s);
1231 return ret;
1232}
1233
e5b2c207 1234static const char * const __record_usage[] = {
9e096753
MG
1235 "perf record [<options>] [<command>]",
1236 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
1237 NULL
1238};
e5b2c207 1239const char * const *record_usage = __record_usage;
0e9b20b8 1240
d20deb64 1241/*
8c6f45a7
ACM
1242 * XXX Ideally would be local to cmd_record() and passed to a record__new
1243 * because we need to have access to it in record__exit, that is called
d20deb64
ACM
1244 * after cmd_record() exits, but since record_options need to be accessible to
1245 * builtin-script, leave it here.
1246 *
1247 * At least we don't ouch it in all the other functions here directly.
1248 *
1249 * Just say no to tons of global variables, sigh.
1250 */
8c6f45a7 1251static struct record record = {
d20deb64 1252 .opts = {
8affc2b8 1253 .sample_time = true,
d20deb64
ACM
1254 .mmap_pages = UINT_MAX,
1255 .user_freq = UINT_MAX,
1256 .user_interval = ULLONG_MAX,
447a6013 1257 .freq = 4000,
d1cb9fce
NK
1258 .target = {
1259 .uses_mmap = true,
3aa5939d 1260 .default_per_cpu = true,
d1cb9fce 1261 },
9d9cad76 1262 .proc_map_timeout = 500,
d20deb64 1263 },
e3d59112
NK
1264 .tool = {
1265 .sample = process_sample_event,
1266 .fork = perf_event__process_fork,
cca8482c 1267 .exit = perf_event__process_exit,
e3d59112
NK
1268 .comm = perf_event__process_comm,
1269 .mmap = perf_event__process_mmap,
1270 .mmap2 = perf_event__process_mmap2,
cca8482c 1271 .ordered_events = true,
e3d59112 1272 },
d20deb64 1273};
7865e817 1274
76a26549
NK
1275const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1276 "\n\t\t\t\tDefault: fp";
61eaa3be 1277
0aab2136
WN
1278static bool dry_run;
1279
d20deb64
ACM
1280/*
1281 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1282 * with it and switch to use the library functions in perf_evlist that came
b4006796 1283 * from builtin-record.c, i.e. use record_opts,
d20deb64
ACM
1284 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1285 * using pipes, etc.
1286 */
e5b2c207 1287struct option __record_options[] = {
d20deb64 1288 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 1289 "event selector. use 'perf list' to list available events",
f120f9d5 1290 parse_events_option),
d20deb64 1291 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 1292 "event filter", parse_filter),
4ba1faa1
WN
1293 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1294 NULL, "don't record events from perf itself",
1295 exclude_perf),
bea03405 1296 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 1297 "record events on existing process id"),
bea03405 1298 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 1299 "record events on existing thread id"),
d20deb64 1300 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 1301 "collect data with this RT SCHED_FIFO priority"),
509051ea 1302 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
acac03fa 1303 "collect data without buffering"),
d20deb64 1304 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 1305 "collect raw sample records from all opened counters"),
bea03405 1306 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 1307 "system-wide collection from all CPUs"),
bea03405 1308 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 1309 "list of cpus to monitor"),
d20deb64 1310 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
f5fc1412 1311 OPT_STRING('o', "output", &record.file.path, "file",
abaff32a 1312 "output file name"),
69e7e5b0
AH
1313 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1314 &record.opts.no_inherit_set,
1315 "child tasks do not inherit counters"),
d20deb64 1316 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
e9db1310
AH
1317 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1318 "number of mmap data pages and AUX area tracing mmap pages",
1319 record__parse_mmap_pages),
d20deb64 1320 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 1321 "put the counters into a counter group"),
2ddd5c04 1322 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
09b0fd45
JO
1323 NULL, "enables call-graph recording" ,
1324 &record_callchain_opt),
1325 OPT_CALLBACK(0, "call-graph", &record.opts,
76a26549 1326 "record_mode[,record_size]", record_callchain_help,
09b0fd45 1327 &record_parse_callchain_opt),
c0555642 1328 OPT_INCR('v', "verbose", &verbose,
3da297a6 1329 "be more verbose (show counter open errors, etc)"),
b44308f5 1330 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 1331 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 1332 "per thread counts"),
56100321 1333 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
3abebc55
AH
1334 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1335 &record.opts.sample_time_set,
1336 "Record the sample timestamps"),
56100321 1337 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
d20deb64 1338 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 1339 "don't sample"),
d2db9a98
WN
1340 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1341 &record.no_buildid_cache_set,
1342 "do not update the buildid cache"),
1343 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1344 &record.no_buildid_set,
1345 "do not collect buildids in perf.data"),
d20deb64 1346 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
1347 "monitor event in cgroup name only",
1348 parse_cgroups),
a6205a35 1349 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
6619a53e 1350 "ms to wait before starting measurement after program start"),
bea03405
NK
1351 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1352 "user to profile"),
a5aabdac
SE
1353
1354 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1355 "branch any", "sample any taken branches",
1356 parse_branch_stack),
1357
1358 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1359 "branch filter mask", "branch stack filter modes",
bdfebd84 1360 parse_branch_stack),
05484298
AK
1361 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1362 "sample by weight (on special events only)"),
475eeab9
AK
1363 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1364 "sample transaction flags (special events only)"),
3aa5939d
AH
1365 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1366 "use per-thread mmaps"),
bcc84ec6
SE
1367 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1368 "sample selected machine registers on interrupt,"
1369 " use -I ? to list register names", parse_regs),
85c273d2
AK
1370 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1371 "Record running/enabled time of read (:S) events"),
814c8c38
PZ
1372 OPT_CALLBACK('k', "clockid", &record.opts,
1373 "clockid", "clockid to use for events, see clock_gettime()",
1374 parse_clockid),
2dd6d8a1
AH
1375 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1376 "opts", "AUX area tracing Snapshot Mode", ""),
9d9cad76
KL
1377 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1378 "per thread proc mmap processing timeout in ms"),
b757bb09
AH
1379 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1380 "Record context switch events"),
85723885
JO
1381 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1382 "Configure all used events to run in kernel space.",
1383 PARSE_OPT_EXCLUSIVE),
1384 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1385 "Configure all used events to run in user space.",
1386 PARSE_OPT_EXCLUSIVE),
71dc2326
WN
1387 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1388 "clang binary to use for compiling BPF scriptlets"),
1389 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1390 "options passed to clang when compiling BPF scriptlets"),
7efe0e03
HK
1391 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1392 "file", "vmlinux pathname"),
6156681b
NK
1393 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1394 "Record build-id of all DSOs regardless of hits"),
ecfd7a9c
WN
1395 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1396 "append timestamp to output filename"),
3c1cb7e3
WN
1397 OPT_BOOLEAN(0, "switch-output", &record.switch_output,
1398 "Switch output when receive SIGUSR2"),
0aab2136
WN
1399 OPT_BOOLEAN(0, "dry-run", &dry_run,
1400 "Parse options then exit"),
0e9b20b8
IM
1401 OPT_END()
1402};
1403
e5b2c207
NK
1404struct option *record_options = __record_options;
1405
1d037ca1 1406int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
0e9b20b8 1407{
ef149c25 1408 int err;
8c6f45a7 1409 struct record *rec = &record;
16ad2ffb 1410 char errbuf[BUFSIZ];
0e9b20b8 1411
48e1cab1
WN
1412#ifndef HAVE_LIBBPF_SUPPORT
1413# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1414 set_nobuild('\0', "clang-path", true);
1415 set_nobuild('\0', "clang-opt", true);
1416# undef set_nobuild
7efe0e03
HK
1417#endif
1418
1419#ifndef HAVE_BPF_PROLOGUE
1420# if !defined (HAVE_DWARF_SUPPORT)
1421# define REASON "NO_DWARF=1"
1422# elif !defined (HAVE_LIBBPF_SUPPORT)
1423# define REASON "NO_LIBBPF=1"
1424# else
1425# define REASON "this architecture doesn't support BPF prologue"
1426# endif
1427# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1428 set_nobuild('\0', "vmlinux", true);
1429# undef set_nobuild
1430# undef REASON
48e1cab1
WN
1431#endif
1432
3e2be2da
ACM
1433 rec->evlist = perf_evlist__new();
1434 if (rec->evlist == NULL)
361c99a6
ACM
1435 return -ENOMEM;
1436
eb853e80
JO
1437 perf_config(perf_record_config, rec);
1438
bca647aa 1439 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 1440 PARSE_OPT_STOP_AT_NON_OPTION);
602ad878 1441 if (!argc && target__none(&rec->opts.target))
bca647aa 1442 usage_with_options(record_usage, record_options);
0e9b20b8 1443
bea03405 1444 if (nr_cgroups && !rec->opts.target.system_wide) {
c7118369
NK
1445 usage_with_options_msg(record_usage, record_options,
1446 "cgroup monitoring only available in system-wide mode");
1447
023695d9 1448 }
b757bb09
AH
1449 if (rec->opts.record_switch_events &&
1450 !perf_can_record_switch_events()) {
c7118369
NK
1451 ui__error("kernel does not support recording context switch events\n");
1452 parse_options_usage(record_usage, record_options, "switch-events", 0);
1453 return -EINVAL;
b757bb09 1454 }
023695d9 1455
eca857ab
WN
1456 if (rec->switch_output)
1457 rec->timestamp_filename = true;
1458
ef149c25
AH
1459 if (!rec->itr) {
1460 rec->itr = auxtrace_record__init(rec->evlist, &err);
1461 if (err)
1462 return err;
1463 }
1464
2dd6d8a1
AH
1465 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1466 rec->opts.auxtrace_snapshot_opts);
1467 if (err)
1468 return err;
1469
0aab2136
WN
1470 if (dry_run)
1471 return 0;
1472
d7888573
WN
1473 err = bpf__setup_stdout(rec->evlist);
1474 if (err) {
1475 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1476 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1477 errbuf);
1478 return err;
1479 }
1480
ef149c25
AH
1481 err = -ENOMEM;
1482
0a7e6d1b 1483 symbol__init(NULL);
baa2f6ce 1484
ec80fde7 1485 if (symbol_conf.kptr_restrict)
646aaea6
ACM
1486 pr_warning(
1487"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1488"check /proc/sys/kernel/kptr_restrict.\n\n"
1489"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1490"file is not found in the buildid cache or in the vmlinux path.\n\n"
1491"Samples in kernel modules won't be resolved at all.\n\n"
1492"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1493"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 1494
0c1d46a8 1495 if (rec->no_buildid_cache || rec->no_buildid) {
a1ac1d3c 1496 disable_buildid_cache();
0c1d46a8
WN
1497 } else if (rec->switch_output) {
1498 /*
1499 * In 'perf record --switch-output', disable buildid
1500 * generation by default to reduce data file switching
1501 * overhead. Still generate buildid if they are required
1502 * explicitly using
1503 *
1504 * perf record --signal-trigger --no-no-buildid \
1505 * --no-no-buildid-cache
1506 *
1507 * Following code equals to:
1508 *
1509 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1510 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1511 * disable_buildid_cache();
1512 */
1513 bool disable = true;
1514
1515 if (rec->no_buildid_set && !rec->no_buildid)
1516 disable = false;
1517 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1518 disable = false;
1519 if (disable) {
1520 rec->no_buildid = true;
1521 rec->no_buildid_cache = true;
1522 disable_buildid_cache();
1523 }
1524 }
655000e7 1525
3e2be2da
ACM
1526 if (rec->evlist->nr_entries == 0 &&
1527 perf_evlist__add_default(rec->evlist) < 0) {
69aad6f1
ACM
1528 pr_err("Not enough memory for event selector list\n");
1529 goto out_symbol_exit;
bbd36e5e 1530 }
0e9b20b8 1531
69e7e5b0
AH
1532 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1533 rec->opts.no_inherit = true;
1534
602ad878 1535 err = target__validate(&rec->opts.target);
16ad2ffb 1536 if (err) {
602ad878 1537 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
16ad2ffb
NK
1538 ui__warning("%s", errbuf);
1539 }
1540
602ad878 1541 err = target__parse_uid(&rec->opts.target);
16ad2ffb
NK
1542 if (err) {
1543 int saved_errno = errno;
4bd0f2d2 1544
602ad878 1545 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 1546 ui__error("%s", errbuf);
16ad2ffb
NK
1547
1548 err = -saved_errno;
8fa60e1f 1549 goto out_symbol_exit;
16ad2ffb 1550 }
0d37aa34 1551
16ad2ffb 1552 err = -ENOMEM;
3e2be2da 1553 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
dd7927f4 1554 usage_with_options(record_usage, record_options);
69aad6f1 1555
ef149c25
AH
1556 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1557 if (err)
1558 goto out_symbol_exit;
1559
6156681b
NK
1560 /*
1561 * We take all buildids when the file contains
1562 * AUX area tracing data because we do not decode the
1563 * trace because it would take too long.
1564 */
1565 if (rec->opts.full_auxtrace)
1566 rec->buildid_all = true;
1567
b4006796 1568 if (record_opts__config(&rec->opts)) {
39d17dac 1569 err = -EINVAL;
03ad9747 1570 goto out_symbol_exit;
7e4ff9e3
MG
1571 }
1572
d20deb64 1573 err = __cmd_record(&record, argc, argv);
d65a458b 1574out_symbol_exit:
45604710 1575 perf_evlist__delete(rec->evlist);
d65a458b 1576 symbol__exit();
ef149c25 1577 auxtrace_record__free(rec->itr);
39d17dac 1578 return err;
0e9b20b8 1579}
2dd6d8a1
AH
1580
1581static void snapshot_sig_handler(int sig __maybe_unused)
1582{
5f9cf599
WN
1583 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1584 trigger_hit(&auxtrace_snapshot_trigger);
1585 auxtrace_record__snapshot_started = 1;
1586 if (auxtrace_record__snapshot_start(record.itr))
1587 trigger_error(&auxtrace_snapshot_trigger);
1588 }
3c1cb7e3
WN
1589
1590 if (trigger_is_ready(&switch_output_trigger))
1591 trigger_hit(&switch_output_trigger);
2dd6d8a1 1592}
This page took 0.586149 seconds and 5 git commands to generate.