perf record: Always force PERF_RECORD_FINISHED_ROUND event
[deliverable/linux.git] / tools / perf / builtin-record.c
CommitLineData
abaff32a 1/*
bf9e1876
IM
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
abaff32a 7 */
16f762a2 8#include "builtin.h"
bf9e1876
IM
9
10#include "perf.h"
11
6122e4e4 12#include "util/build-id.h"
6eda5838 13#include "util/util.h"
0e9b20b8 14#include "util/parse-options.h"
8ad8db37 15#include "util/parse-events.h"
6eda5838 16
7c6a1c65 17#include "util/header.h"
66e274f3 18#include "util/event.h"
361c99a6 19#include "util/evlist.h"
69aad6f1 20#include "util/evsel.h"
8f28827a 21#include "util/debug.h"
94c744b6 22#include "util/session.h"
45694aa7 23#include "util/tool.h"
8d06367f 24#include "util/symbol.h"
a12b51c4 25#include "util/cpumap.h"
fd78260b 26#include "util/thread_map.h"
f5fc1412 27#include "util/data.h"
7c6a1c65 28
97124d5e 29#include <unistd.h>
de9ac07b 30#include <sched.h>
a41794cd 31#include <sys/mman.h>
de9ac07b 32
78da39fa 33
8c6f45a7 34struct record {
45694aa7 35 struct perf_tool tool;
b4006796 36 struct record_opts opts;
d20deb64 37 u64 bytes_written;
f5fc1412 38 struct perf_data_file file;
d20deb64
ACM
39 struct perf_evlist *evlist;
40 struct perf_session *session;
41 const char *progname;
d20deb64 42 int realtime_prio;
d20deb64
ACM
43 bool no_buildid;
44 bool no_buildid_cache;
d20deb64 45 long samples;
0f82ebc4 46};
a21ca2ca 47
8c6f45a7 48static int record__write(struct record *rec, void *bf, size_t size)
f5970550 49{
cf8b2e69 50 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
50a9b868
JO
51 pr_err("failed to write perf data, error: %m\n");
52 return -1;
f5970550 53 }
8d3eca20 54
cf8b2e69 55 rec->bytes_written += size;
8d3eca20 56 return 0;
f5970550
PZ
57}
58
45694aa7 59static int process_synthesized_event(struct perf_tool *tool,
d20deb64 60 union perf_event *event,
1d037ca1
IT
61 struct perf_sample *sample __maybe_unused,
62 struct machine *machine __maybe_unused)
234fbbf5 63{
8c6f45a7
ACM
64 struct record *rec = container_of(tool, struct record, tool);
65 return record__write(rec, event, event->header.size);
234fbbf5
ACM
66}
67
8c6f45a7 68static int record__mmap_read(struct record *rec, struct perf_mmap *md)
de9ac07b 69{
744bd8aa 70 unsigned int head = perf_mmap__read_head(md);
de9ac07b 71 unsigned int old = md->prev;
918512b4 72 unsigned char *data = md->base + page_size;
de9ac07b
PZ
73 unsigned long size;
74 void *buf;
8d3eca20 75 int rc = 0;
de9ac07b 76
dc82009a 77 if (old == head)
8d3eca20 78 return 0;
dc82009a 79
d20deb64 80 rec->samples++;
de9ac07b
PZ
81
82 size = head - old;
83
84 if ((old & md->mask) + size != (head & md->mask)) {
85 buf = &data[old & md->mask];
86 size = md->mask + 1 - (old & md->mask);
87 old += size;
021e9f47 88
8c6f45a7 89 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
90 rc = -1;
91 goto out;
92 }
de9ac07b
PZ
93 }
94
95 buf = &data[old & md->mask];
96 size = head - old;
97 old += size;
021e9f47 98
8c6f45a7 99 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
100 rc = -1;
101 goto out;
102 }
de9ac07b
PZ
103
104 md->prev = old;
115d2d89 105 perf_mmap__write_tail(md, old);
8d3eca20
DA
106
107out:
108 return rc;
de9ac07b
PZ
109}
110
111static volatile int done = 0;
f7b7c26e 112static volatile int signr = -1;
33e49ea7 113static volatile int child_finished = 0;
de9ac07b 114
16c8a109 115static void sig_handler(int sig)
de9ac07b 116{
33e49ea7
AK
117 if (sig == SIGCHLD)
118 child_finished = 1;
45604710
NK
119 else
120 signr = sig;
33e49ea7 121
16c8a109 122 done = 1;
f7b7c26e
PZ
123}
124
45604710 125static void record__sig_exit(void)
f7b7c26e 126{
45604710 127 if (signr == -1)
f7b7c26e
PZ
128 return;
129
130 signal(signr, SIG_DFL);
45604710 131 raise(signr);
de9ac07b
PZ
132}
133
8c6f45a7 134static int record__open(struct record *rec)
dd7927f4 135{
56e52e85 136 char msg[512];
6a4bb04c 137 struct perf_evsel *pos;
d20deb64
ACM
138 struct perf_evlist *evlist = rec->evlist;
139 struct perf_session *session = rec->session;
b4006796 140 struct record_opts *opts = &rec->opts;
8d3eca20 141 int rc = 0;
dd7927f4 142
f77a9518 143 perf_evlist__config(evlist, opts);
cac21425 144
0050f7aa 145 evlist__for_each(evlist, pos) {
dd7927f4 146try_again:
6a4bb04c 147 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
56e52e85 148 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
d6d901c2 149 if (verbose)
c0a54341 150 ui__warning("%s\n", msg);
d6d901c2
ZY
151 goto try_again;
152 }
ca6a4258 153
56e52e85
ACM
154 rc = -errno;
155 perf_evsel__open_strerror(pos, &opts->target,
156 errno, msg, sizeof(msg));
157 ui__error("%s\n", msg);
8d3eca20 158 goto out;
c171b552
LZ
159 }
160 }
a43d3f08 161
1491a632 162 if (perf_evlist__apply_filters(evlist)) {
0a102479
FW
163 error("failed to set filter with %d (%s)\n", errno,
164 strerror(errno));
8d3eca20
DA
165 rc = -1;
166 goto out;
0a102479
FW
167 }
168
18e60939 169 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
8d3eca20
DA
170 if (errno == EPERM) {
171 pr_err("Permission error mapping pages.\n"
172 "Consider increasing "
173 "/proc/sys/kernel/perf_event_mlock_kb,\n"
174 "or try again with a smaller value of -m/--mmap_pages.\n"
53653d70 175 "(current value: %u)\n", opts->mmap_pages);
8d3eca20 176 rc = -errno;
8d3eca20
DA
177 } else {
178 pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
179 rc = -errno;
180 }
181 goto out;
18e60939 182 }
0a27d7f9 183
563aecb2 184 session->evlist = evlist;
7b56cce2 185 perf_session__set_id_hdr_size(session);
8d3eca20
DA
186out:
187 return rc;
16c8a109
PZ
188}
189
8c6f45a7 190static int process_buildids(struct record *rec)
6122e4e4 191{
f5fc1412
JO
192 struct perf_data_file *file = &rec->file;
193 struct perf_session *session = rec->session;
7ab75cff 194 u64 start = session->header.data_offset;
6122e4e4 195
f5fc1412 196 u64 size = lseek(file->fd, 0, SEEK_CUR);
9f591fd7
ACM
197 if (size == 0)
198 return 0;
199
7ab75cff
DA
200 return __perf_session__process_events(session, start,
201 size - start,
6122e4e4
ACM
202 size, &build_id__mark_dso_hit_ops);
203}
204
8115d60c 205static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
206{
207 int err;
45694aa7 208 struct perf_tool *tool = data;
a1645ce1
ZY
209 /*
210 *As for guest kernel when processing subcommand record&report,
211 *we arrange module mmap prior to guest kernel mmap and trigger
212 *a preload dso because default guest module symbols are loaded
213 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
214 *method is used to avoid symbol missing when the first addr is
215 *in module instead of in guest kernel.
216 */
45694aa7 217 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 218 machine);
a1645ce1
ZY
219 if (err < 0)
220 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 221 " relocation symbol.\n", machine->pid);
a1645ce1 222
a1645ce1
ZY
223 /*
224 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
225 * have no _text sometimes.
226 */
45694aa7 227 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 228 machine);
a1645ce1
ZY
229 if (err < 0)
230 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 231 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
232}
233
98402807
FW
234static struct perf_event_header finished_round_event = {
235 .size = sizeof(struct perf_event_header),
236 .type = PERF_RECORD_FINISHED_ROUND,
237};
238
8c6f45a7 239static int record__mmap_read_all(struct record *rec)
98402807 240{
0e2e63dd 241 int i;
8d3eca20 242 int rc = 0;
98402807 243
d20deb64 244 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
8d3eca20 245 if (rec->evlist->mmap[i].base) {
8c6f45a7 246 if (record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
8d3eca20
DA
247 rc = -1;
248 goto out;
249 }
250 }
98402807
FW
251 }
252
33bf7481 253 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
8d3eca20
DA
254
255out:
256 return rc;
98402807
FW
257}
258
8c6f45a7 259static void record__init_features(struct record *rec)
57706abc 260{
57706abc
DA
261 struct perf_session *session = rec->session;
262 int feat;
263
264 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
265 perf_header__set_feat(&session->header, feat);
266
267 if (rec->no_buildid)
268 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
269
3e2be2da 270 if (!have_tracepoints(&rec->evlist->entries))
57706abc
DA
271 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
272
273 if (!rec->opts.branch_stack)
274 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
275}
276
f33cbe72
ACM
277static volatile int workload_exec_errno;
278
279/*
280 * perf_evlist__prepare_workload will send a SIGUSR1
281 * if the fork fails, since we asked by setting its
282 * want_signal to true.
283 */
45604710
NK
284static void workload_exec_failed_signal(int signo __maybe_unused,
285 siginfo_t *info,
f33cbe72
ACM
286 void *ucontext __maybe_unused)
287{
288 workload_exec_errno = info->si_value.sival_int;
289 done = 1;
f33cbe72
ACM
290 child_finished = 1;
291}
292
8c6f45a7 293static int __cmd_record(struct record *rec, int argc, const char **argv)
16c8a109 294{
57706abc 295 int err;
45604710 296 int status = 0;
8b412664 297 unsigned long waking = 0;
46be604b 298 const bool forks = argc > 0;
23346f21 299 struct machine *machine;
45694aa7 300 struct perf_tool *tool = &rec->tool;
b4006796 301 struct record_opts *opts = &rec->opts;
f5fc1412 302 struct perf_data_file *file = &rec->file;
d20deb64 303 struct perf_session *session;
2711926a 304 bool disabled = false;
de9ac07b 305
d20deb64 306 rec->progname = argv[0];
33e49ea7 307
45604710 308 atexit(record__sig_exit);
f5970550
PZ
309 signal(SIGCHLD, sig_handler);
310 signal(SIGINT, sig_handler);
804f7ac7 311 signal(SIGTERM, sig_handler);
f5970550 312
f5fc1412 313 session = perf_session__new(file, false, NULL);
94c744b6 314 if (session == NULL) {
ffa91880 315 pr_err("Perf session creation failed.\n");
a9a70bbc
ACM
316 return -1;
317 }
318
d20deb64
ACM
319 rec->session = session;
320
8c6f45a7 321 record__init_features(rec);
330aa675 322
d4db3f16 323 if (forks) {
3e2be2da 324 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
f5fc1412 325 argv, file->is_pipe,
735f7e0b 326 workload_exec_failed_signal);
35b9d88e
ACM
327 if (err < 0) {
328 pr_err("Couldn't run the workload!\n");
45604710 329 status = err;
35b9d88e 330 goto out_delete_session;
856e9660 331 }
856e9660
PZ
332 }
333
8c6f45a7 334 if (record__open(rec) != 0) {
8d3eca20 335 err = -1;
45604710 336 goto out_child;
8d3eca20 337 }
de9ac07b 338
3e2be2da 339 if (!rec->evlist->nr_groups)
a8bb559b
NK
340 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
341
f5fc1412
JO
342 if (file->is_pipe) {
343 err = perf_header__write_pipe(file->fd);
529870e3 344 if (err < 0)
45604710 345 goto out_child;
563aecb2 346 } else {
3e2be2da 347 err = perf_session__write_header(session, rec->evlist,
f5fc1412 348 file->fd, false);
d5eed904 349 if (err < 0)
45604710 350 goto out_child;
56b03f3c
ACM
351 }
352
d3665498 353 if (!rec->no_buildid
e20960c0 354 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 355 pr_err("Couldn't generate buildids. "
e20960c0 356 "Use --no-buildid to profile anyway.\n");
8d3eca20 357 err = -1;
45604710 358 goto out_child;
e20960c0
RR
359 }
360
34ba5122 361 machine = &session->machines.host;
743eb868 362
f5fc1412 363 if (file->is_pipe) {
45694aa7 364 err = perf_event__synthesize_attrs(tool, session,
d20deb64 365 process_synthesized_event);
2c46dbb5
TZ
366 if (err < 0) {
367 pr_err("Couldn't synthesize attrs.\n");
45604710 368 goto out_child;
2c46dbb5 369 }
cd19a035 370
3e2be2da 371 if (have_tracepoints(&rec->evlist->entries)) {
63e0c771
TZ
372 /*
373 * FIXME err <= 0 here actually means that
374 * there were no tracepoints so its not really
375 * an error, just that we don't need to
376 * synthesize anything. We really have to
377 * return this more properly and also
378 * propagate errors that now are calling die()
379 */
3e2be2da 380 err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
743eb868 381 process_synthesized_event);
63e0c771
TZ
382 if (err <= 0) {
383 pr_err("Couldn't record tracing data.\n");
45604710 384 goto out_child;
63e0c771 385 }
f34b9001 386 rec->bytes_written += err;
63e0c771 387 }
2c46dbb5
TZ
388 }
389
45694aa7 390 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 391 machine);
c1a3a4b9
ACM
392 if (err < 0)
393 pr_err("Couldn't record kernel reference relocation symbol\n"
394 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
395 "Check /proc/kallsyms permission or run as root.\n");
b7cece76 396
45694aa7 397 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 398 machine);
c1a3a4b9
ACM
399 if (err < 0)
400 pr_err("Couldn't record kernel module information.\n"
401 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
402 "Check /proc/modules permission or run as root.\n");
403
7e383de4 404 if (perf_guest) {
876650e6
ACM
405 machines__process_guests(&session->machines,
406 perf_event__synthesize_guest_os, tool);
7e383de4 407 }
7c6a1c65 408
3e2be2da 409 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
a33fbd56 410 process_synthesized_event, opts->sample_address);
8d3eca20 411 if (err != 0)
45604710 412 goto out_child;
8d3eca20 413
d20deb64 414 if (rec->realtime_prio) {
de9ac07b
PZ
415 struct sched_param param;
416
d20deb64 417 param.sched_priority = rec->realtime_prio;
de9ac07b 418 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 419 pr_err("Could not set realtime priority.\n");
8d3eca20 420 err = -1;
45604710 421 goto out_child;
de9ac07b
PZ
422 }
423 }
424
774cb499
JO
425 /*
426 * When perf is starting the traced process, all the events
427 * (apart from group members) have enable_on_exec=1 set,
428 * so don't spoil it by prematurely enabling them.
429 */
6619a53e 430 if (!target__none(&opts->target) && !opts->initial_delay)
3e2be2da 431 perf_evlist__enable(rec->evlist);
764e16a3 432
856e9660
PZ
433 /*
434 * Let the child rip
435 */
735f7e0b 436 if (forks)
3e2be2da 437 perf_evlist__start_workload(rec->evlist);
856e9660 438
6619a53e
AK
439 if (opts->initial_delay) {
440 usleep(opts->initial_delay * 1000);
441 perf_evlist__enable(rec->evlist);
442 }
443
649c48a9 444 for (;;) {
d20deb64 445 int hits = rec->samples;
de9ac07b 446
8c6f45a7 447 if (record__mmap_read_all(rec) < 0) {
8d3eca20 448 err = -1;
45604710 449 goto out_child;
8d3eca20 450 }
de9ac07b 451
d20deb64 452 if (hits == rec->samples) {
649c48a9
PZ
453 if (done)
454 break;
3e2be2da 455 err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
a515114f
JO
456 /*
457 * Propagate error, only if there's any. Ignore positive
458 * number of returned events and interrupt error.
459 */
460 if (err > 0 || (err < 0 && errno == EINTR))
45604710 461 err = 0;
8b412664
PZ
462 waking++;
463 }
464
774cb499
JO
465 /*
466 * When perf is starting the traced process, at the end events
467 * die with the process and we wait for that. Thus no need to
468 * disable events in this case.
469 */
602ad878 470 if (done && !disabled && !target__none(&opts->target)) {
3e2be2da 471 perf_evlist__disable(rec->evlist);
2711926a
JO
472 disabled = true;
473 }
de9ac07b
PZ
474 }
475
f33cbe72
ACM
476 if (forks && workload_exec_errno) {
477 char msg[512];
478 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
479 pr_err("Workload failed: %s\n", emsg);
480 err = -1;
45604710 481 goto out_child;
f33cbe72
ACM
482 }
483
45604710
NK
484 if (!quiet) {
485 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
b44308f5 486
45604710
NK
487 /*
488 * Approximate RIP event size: 24 bytes.
489 */
490 fprintf(stderr,
491 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
492 (double)rec->bytes_written / 1024.0 / 1024.0,
493 file->path,
494 rec->bytes_written / 24);
495 }
8b412664 496
45604710
NK
497out_child:
498 if (forks) {
499 int exit_status;
addc2785 500
45604710
NK
501 if (!child_finished)
502 kill(rec->evlist->workload.pid, SIGTERM);
503
504 wait(&exit_status);
505
506 if (err < 0)
507 status = err;
508 else if (WIFEXITED(exit_status))
509 status = WEXITSTATUS(exit_status);
510 else if (WIFSIGNALED(exit_status))
511 signr = WTERMSIG(exit_status);
512 } else
513 status = err;
514
515 if (!err && !file->is_pipe) {
516 rec->session->header.data_size += rec->bytes_written;
517
518 if (!rec->no_buildid)
519 process_buildids(rec);
520 perf_session__write_header(rec->session, rec->evlist,
521 file->fd, true);
522 }
39d17dac
ACM
523
524out_delete_session:
525 perf_session__delete(session);
45604710 526 return status;
de9ac07b 527}
0e9b20b8 528
bdfebd84
RAV
529#define BRANCH_OPT(n, m) \
530 { .name = n, .mode = (m) }
531
532#define BRANCH_END { .name = NULL }
533
534struct branch_mode {
535 const char *name;
536 int mode;
537};
538
539static const struct branch_mode branch_modes[] = {
540 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
541 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
542 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
543 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
544 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
545 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
546 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
0126d493
AK
547 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
548 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
549 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
0fffa5df 550 BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
bdfebd84
RAV
551 BRANCH_END
552};
553
554static int
a5aabdac 555parse_branch_stack(const struct option *opt, const char *str, int unset)
bdfebd84
RAV
556{
557#define ONLY_PLM \
558 (PERF_SAMPLE_BRANCH_USER |\
559 PERF_SAMPLE_BRANCH_KERNEL |\
560 PERF_SAMPLE_BRANCH_HV)
561
562 uint64_t *mode = (uint64_t *)opt->value;
563 const struct branch_mode *br;
a5aabdac 564 char *s, *os = NULL, *p;
bdfebd84
RAV
565 int ret = -1;
566
a5aabdac
SE
567 if (unset)
568 return 0;
bdfebd84 569
a5aabdac
SE
570 /*
571 * cannot set it twice, -b + --branch-filter for instance
572 */
573 if (*mode)
bdfebd84
RAV
574 return -1;
575
a5aabdac
SE
576 /* str may be NULL in case no arg is passed to -b */
577 if (str) {
578 /* because str is read-only */
579 s = os = strdup(str);
580 if (!s)
581 return -1;
582
583 for (;;) {
584 p = strchr(s, ',');
585 if (p)
586 *p = '\0';
587
588 for (br = branch_modes; br->name; br++) {
589 if (!strcasecmp(s, br->name))
590 break;
591 }
592 if (!br->name) {
593 ui__warning("unknown branch filter %s,"
594 " check man page\n", s);
595 goto error;
596 }
bdfebd84 597
a5aabdac 598 *mode |= br->mode;
bdfebd84 599
a5aabdac
SE
600 if (!p)
601 break;
bdfebd84 602
a5aabdac
SE
603 s = p + 1;
604 }
bdfebd84
RAV
605 }
606 ret = 0;
607
a5aabdac 608 /* default to any branch */
bdfebd84 609 if ((*mode & ~ONLY_PLM) == 0) {
a5aabdac 610 *mode = PERF_SAMPLE_BRANCH_ANY;
bdfebd84
RAV
611 }
612error:
613 free(os);
614 return ret;
615}
616
9ff125d1 617#ifdef HAVE_DWARF_UNWIND_SUPPORT
26d33022
JO
618static int get_stack_size(char *str, unsigned long *_size)
619{
620 char *endptr;
621 unsigned long size;
622 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
623
624 size = strtoul(str, &endptr, 0);
625
626 do {
627 if (*endptr)
628 break;
629
630 size = round_up(size, sizeof(u64));
631 if (!size || size > max_size)
632 break;
633
634 *_size = size;
635 return 0;
636
637 } while (0);
638
639 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
640 max_size, str);
641 return -1;
642}
9ff125d1 643#endif /* HAVE_DWARF_UNWIND_SUPPORT */
26d33022 644
b4006796 645int record_parse_callchain(const char *arg, struct record_opts *opts)
26d33022 646{
26d33022
JO
647 char *tok, *name, *saveptr = NULL;
648 char *buf;
649 int ret = -1;
650
26d33022
JO
651 /* We need buffer that we know we can write to. */
652 buf = malloc(strlen(arg) + 1);
653 if (!buf)
654 return -ENOMEM;
655
656 strcpy(buf, arg);
657
658 tok = strtok_r((char *)buf, ",", &saveptr);
659 name = tok ? : (char *)buf;
660
661 do {
662 /* Framepointer style */
663 if (!strncmp(name, "fp", sizeof("fp"))) {
664 if (!strtok_r(NULL, ",", &saveptr)) {
c5ff78c3 665 opts->call_graph = CALLCHAIN_FP;
26d33022
JO
666 ret = 0;
667 } else
668 pr_err("callchain: No more arguments "
669 "needed for -g fp\n");
670 break;
671
9ff125d1 672#ifdef HAVE_DWARF_UNWIND_SUPPORT
26d33022
JO
673 /* Dwarf style */
674 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
61eaa3be
ACM
675 const unsigned long default_stack_dump_size = 8192;
676
26d33022 677 ret = 0;
c5ff78c3
ACM
678 opts->call_graph = CALLCHAIN_DWARF;
679 opts->stack_dump_size = default_stack_dump_size;
26d33022
JO
680
681 tok = strtok_r(NULL, ",", &saveptr);
682 if (tok) {
683 unsigned long size = 0;
684
685 ret = get_stack_size(tok, &size);
c5ff78c3 686 opts->stack_dump_size = size;
26d33022 687 }
9ff125d1 688#endif /* HAVE_DWARF_UNWIND_SUPPORT */
26d33022 689 } else {
09b0fd45 690 pr_err("callchain: Unknown --call-graph option "
26d33022
JO
691 "value: %s\n", arg);
692 break;
693 }
694
695 } while (0);
696
697 free(buf);
09b0fd45
JO
698 return ret;
699}
700
b4006796 701static void callchain_debug(struct record_opts *opts)
09b0fd45 702{
a601fdff
JO
703 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };
704
705 pr_debug("callchain: type %s\n", str[opts->call_graph]);
26d33022 706
09b0fd45
JO
707 if (opts->call_graph == CALLCHAIN_DWARF)
708 pr_debug("callchain: stack dump size %d\n",
709 opts->stack_dump_size);
710}
711
712int record_parse_callchain_opt(const struct option *opt,
713 const char *arg,
714 int unset)
715{
b4006796 716 struct record_opts *opts = opt->value;
09b0fd45
JO
717 int ret;
718
eb853e80
JO
719 opts->call_graph_enabled = !unset;
720
09b0fd45
JO
721 /* --no-call-graph */
722 if (unset) {
723 opts->call_graph = CALLCHAIN_NONE;
724 pr_debug("callchain: disabled\n");
725 return 0;
726 }
727
728 ret = record_parse_callchain(arg, opts);
26d33022 729 if (!ret)
09b0fd45 730 callchain_debug(opts);
26d33022
JO
731
732 return ret;
733}
734
09b0fd45
JO
735int record_callchain_opt(const struct option *opt,
736 const char *arg __maybe_unused,
737 int unset __maybe_unused)
738{
b4006796 739 struct record_opts *opts = opt->value;
09b0fd45 740
eb853e80
JO
741 opts->call_graph_enabled = !unset;
742
09b0fd45
JO
743 if (opts->call_graph == CALLCHAIN_NONE)
744 opts->call_graph = CALLCHAIN_FP;
745
746 callchain_debug(opts);
747 return 0;
748}
749
eb853e80
JO
750static int perf_record_config(const char *var, const char *value, void *cb)
751{
752 struct record *rec = cb;
753
754 if (!strcmp(var, "record.call-graph"))
755 return record_parse_callchain(value, &rec->opts);
756
757 return perf_default_config(var, value, cb);
758}
759
0e9b20b8 760static const char * const record_usage[] = {
9e096753
MG
761 "perf record [<options>] [<command>]",
762 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
763 NULL
764};
765
d20deb64 766/*
8c6f45a7
ACM
767 * XXX Ideally would be local to cmd_record() and passed to a record__new
768 * because we need to have access to it in record__exit, that is called
d20deb64
ACM
769 * after cmd_record() exits, but since record_options need to be accessible to
770 * builtin-script, leave it here.
771 *
772 * At least we don't ouch it in all the other functions here directly.
773 *
774 * Just say no to tons of global variables, sigh.
775 */
8c6f45a7 776static struct record record = {
d20deb64 777 .opts = {
d20deb64
ACM
778 .mmap_pages = UINT_MAX,
779 .user_freq = UINT_MAX,
780 .user_interval = ULLONG_MAX,
447a6013 781 .freq = 4000,
d1cb9fce
NK
782 .target = {
783 .uses_mmap = true,
3aa5939d 784 .default_per_cpu = true,
d1cb9fce 785 },
d20deb64 786 },
d20deb64 787};
7865e817 788
09b0fd45 789#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
61eaa3be 790
9ff125d1 791#ifdef HAVE_DWARF_UNWIND_SUPPORT
09b0fd45 792const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
61eaa3be 793#else
09b0fd45 794const char record_callchain_help[] = CALLCHAIN_HELP "fp";
61eaa3be
ACM
795#endif
796
d20deb64
ACM
797/*
798 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
799 * with it and switch to use the library functions in perf_evlist that came
b4006796 800 * from builtin-record.c, i.e. use record_opts,
d20deb64
ACM
801 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
802 * using pipes, etc.
803 */
bca647aa 804const struct option record_options[] = {
d20deb64 805 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 806 "event selector. use 'perf list' to list available events",
f120f9d5 807 parse_events_option),
d20deb64 808 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 809 "event filter", parse_filter),
bea03405 810 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 811 "record events on existing process id"),
bea03405 812 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 813 "record events on existing thread id"),
d20deb64 814 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 815 "collect data with this RT SCHED_FIFO priority"),
509051ea 816 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
acac03fa 817 "collect data without buffering"),
d20deb64 818 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 819 "collect raw sample records from all opened counters"),
bea03405 820 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 821 "system-wide collection from all CPUs"),
bea03405 822 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 823 "list of cpus to monitor"),
d20deb64 824 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
f5fc1412 825 OPT_STRING('o', "output", &record.file.path, "file",
abaff32a 826 "output file name"),
69e7e5b0
AH
827 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
828 &record.opts.no_inherit_set,
829 "child tasks do not inherit counters"),
d20deb64 830 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
994a1f78
JO
831 OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
832 "number of mmap data pages",
833 perf_evlist__parse_mmap_pages),
d20deb64 834 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 835 "put the counters into a counter group"),
09b0fd45
JO
836 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
837 NULL, "enables call-graph recording" ,
838 &record_callchain_opt),
839 OPT_CALLBACK(0, "call-graph", &record.opts,
840 "mode[,dump_size]", record_callchain_help,
841 &record_parse_callchain_opt),
c0555642 842 OPT_INCR('v', "verbose", &verbose,
3da297a6 843 "be more verbose (show counter open errors, etc)"),
b44308f5 844 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 845 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 846 "per thread counts"),
d20deb64 847 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
4bba828d 848 "Sample addresses"),
d20deb64 849 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
3e76ac78 850 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
d20deb64 851 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 852 "don't sample"),
d20deb64 853 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
a1ac1d3c 854 "do not update the buildid cache"),
d20deb64 855 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
baa2f6ce 856 "do not collect buildids in perf.data"),
d20deb64 857 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
858 "monitor event in cgroup name only",
859 parse_cgroups),
a6205a35 860 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
6619a53e 861 "ms to wait before starting measurement after program start"),
bea03405
NK
862 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
863 "user to profile"),
a5aabdac
SE
864
865 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
866 "branch any", "sample any taken branches",
867 parse_branch_stack),
868
869 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
870 "branch filter mask", "branch stack filter modes",
bdfebd84 871 parse_branch_stack),
05484298
AK
872 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
873 "sample by weight (on special events only)"),
475eeab9
AK
874 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
875 "sample transaction flags (special events only)"),
3aa5939d
AH
876 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
877 "use per-thread mmaps"),
0e9b20b8
IM
878 OPT_END()
879};
880
1d037ca1 881int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
0e9b20b8 882{
69aad6f1 883 int err = -ENOMEM;
8c6f45a7 884 struct record *rec = &record;
16ad2ffb 885 char errbuf[BUFSIZ];
0e9b20b8 886
3e2be2da
ACM
887 rec->evlist = perf_evlist__new();
888 if (rec->evlist == NULL)
361c99a6
ACM
889 return -ENOMEM;
890
eb853e80
JO
891 perf_config(perf_record_config, rec);
892
bca647aa 893 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 894 PARSE_OPT_STOP_AT_NON_OPTION);
602ad878 895 if (!argc && target__none(&rec->opts.target))
bca647aa 896 usage_with_options(record_usage, record_options);
0e9b20b8 897
bea03405 898 if (nr_cgroups && !rec->opts.target.system_wide) {
3780f488
NK
899 ui__error("cgroup monitoring only available in"
900 " system-wide mode\n");
023695d9
SE
901 usage_with_options(record_usage, record_options);
902 }
903
655000e7 904 symbol__init();
baa2f6ce 905
ec80fde7 906 if (symbol_conf.kptr_restrict)
646aaea6
ACM
907 pr_warning(
908"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
909"check /proc/sys/kernel/kptr_restrict.\n\n"
910"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
911"file is not found in the buildid cache or in the vmlinux path.\n\n"
912"Samples in kernel modules won't be resolved at all.\n\n"
913"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
914"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 915
d20deb64 916 if (rec->no_buildid_cache || rec->no_buildid)
a1ac1d3c 917 disable_buildid_cache();
655000e7 918
3e2be2da
ACM
919 if (rec->evlist->nr_entries == 0 &&
920 perf_evlist__add_default(rec->evlist) < 0) {
69aad6f1
ACM
921 pr_err("Not enough memory for event selector list\n");
922 goto out_symbol_exit;
bbd36e5e 923 }
0e9b20b8 924
69e7e5b0
AH
925 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
926 rec->opts.no_inherit = true;
927
602ad878 928 err = target__validate(&rec->opts.target);
16ad2ffb 929 if (err) {
602ad878 930 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
16ad2ffb
NK
931 ui__warning("%s", errbuf);
932 }
933
602ad878 934 err = target__parse_uid(&rec->opts.target);
16ad2ffb
NK
935 if (err) {
936 int saved_errno = errno;
4bd0f2d2 937
602ad878 938 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 939 ui__error("%s", errbuf);
16ad2ffb
NK
940
941 err = -saved_errno;
8fa60e1f 942 goto out_symbol_exit;
16ad2ffb 943 }
0d37aa34 944
16ad2ffb 945 err = -ENOMEM;
3e2be2da 946 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
dd7927f4 947 usage_with_options(record_usage, record_options);
69aad6f1 948
b4006796 949 if (record_opts__config(&rec->opts)) {
39d17dac 950 err = -EINVAL;
03ad9747 951 goto out_symbol_exit;
7e4ff9e3
MG
952 }
953
d20deb64 954 err = __cmd_record(&record, argc, argv);
d65a458b 955out_symbol_exit:
45604710 956 perf_evlist__delete(rec->evlist);
d65a458b 957 symbol__exit();
39d17dac 958 return err;
0e9b20b8 959}
This page took 0.256166 seconds and 5 git commands to generate.