perf inject: Handle output file via perf_data_file object
[deliverable/linux.git] / tools / perf / builtin-record.c
CommitLineData
abaff32a 1/*
bf9e1876
IM
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
abaff32a 7 */
16f762a2 8#include "builtin.h"
bf9e1876
IM
9
10#include "perf.h"
11
6122e4e4 12#include "util/build-id.h"
6eda5838 13#include "util/util.h"
0e9b20b8 14#include "util/parse-options.h"
8ad8db37 15#include "util/parse-events.h"
6eda5838 16
7c6a1c65 17#include "util/header.h"
66e274f3 18#include "util/event.h"
361c99a6 19#include "util/evlist.h"
69aad6f1 20#include "util/evsel.h"
8f28827a 21#include "util/debug.h"
94c744b6 22#include "util/session.h"
45694aa7 23#include "util/tool.h"
8d06367f 24#include "util/symbol.h"
a12b51c4 25#include "util/cpumap.h"
fd78260b 26#include "util/thread_map.h"
f5fc1412 27#include "util/data.h"
7c6a1c65 28
97124d5e 29#include <unistd.h>
de9ac07b 30#include <sched.h>
a41794cd 31#include <sys/mman.h>
de9ac07b 32
89fe808a 33#ifndef HAVE_ON_EXIT_SUPPORT
78da39fa
BR
34#ifndef ATEXIT_MAX
35#define ATEXIT_MAX 32
36#endif
37static int __on_exit_count = 0;
38typedef void (*on_exit_func_t) (int, void *);
39static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
40static void *__on_exit_args[ATEXIT_MAX];
41static int __exitcode = 0;
42static void __handle_on_exit_funcs(void);
43static int on_exit(on_exit_func_t function, void *arg);
44#define exit(x) (exit)(__exitcode = (x))
45
46static int on_exit(on_exit_func_t function, void *arg)
47{
48 if (__on_exit_count == ATEXIT_MAX)
49 return -ENOMEM;
50 else if (__on_exit_count == 0)
51 atexit(__handle_on_exit_funcs);
52 __on_exit_funcs[__on_exit_count] = function;
53 __on_exit_args[__on_exit_count++] = arg;
54 return 0;
55}
56
57static void __handle_on_exit_funcs(void)
58{
59 int i;
60 for (i = 0; i < __on_exit_count; i++)
61 __on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
62}
63#endif
64
d20deb64 65struct perf_record {
45694aa7 66 struct perf_tool tool;
d20deb64
ACM
67 struct perf_record_opts opts;
68 u64 bytes_written;
f5fc1412 69 struct perf_data_file file;
d20deb64
ACM
70 struct perf_evlist *evlist;
71 struct perf_session *session;
72 const char *progname;
d20deb64 73 int realtime_prio;
d20deb64
ACM
74 bool no_buildid;
75 bool no_buildid_cache;
d20deb64 76 long samples;
0f82ebc4 77};
a21ca2ca 78
6233dd5e 79static int perf_record__write(struct perf_record *rec, void *buf, size_t size)
f5970550 80{
f5fc1412
JO
81 struct perf_data_file *file = &rec->file;
82
f5970550 83 while (size) {
410f1786 84 ssize_t ret = write(file->fd, buf, size);
f5970550 85
8d3eca20 86 if (ret < 0) {
4f624685 87 pr_err("failed to write perf data, error: %m\n");
8d3eca20
DA
88 return -1;
89 }
f5970550
PZ
90
91 size -= ret;
92 buf += ret;
93
d20deb64 94 rec->bytes_written += ret;
f5970550 95 }
8d3eca20
DA
96
97 return 0;
f5970550
PZ
98}
99
45694aa7 100static int process_synthesized_event(struct perf_tool *tool,
d20deb64 101 union perf_event *event,
1d037ca1
IT
102 struct perf_sample *sample __maybe_unused,
103 struct machine *machine __maybe_unused)
234fbbf5 104{
45694aa7 105 struct perf_record *rec = container_of(tool, struct perf_record, tool);
6233dd5e 106 return perf_record__write(rec, event, event->header.size);
234fbbf5
ACM
107}
108
8d3eca20 109static int perf_record__mmap_read(struct perf_record *rec,
d20deb64 110 struct perf_mmap *md)
de9ac07b 111{
744bd8aa 112 unsigned int head = perf_mmap__read_head(md);
de9ac07b 113 unsigned int old = md->prev;
918512b4 114 unsigned char *data = md->base + page_size;
de9ac07b
PZ
115 unsigned long size;
116 void *buf;
8d3eca20 117 int rc = 0;
de9ac07b 118
dc82009a 119 if (old == head)
8d3eca20 120 return 0;
dc82009a 121
d20deb64 122 rec->samples++;
de9ac07b
PZ
123
124 size = head - old;
125
126 if ((old & md->mask) + size != (head & md->mask)) {
127 buf = &data[old & md->mask];
128 size = md->mask + 1 - (old & md->mask);
129 old += size;
021e9f47 130
6233dd5e 131 if (perf_record__write(rec, buf, size) < 0) {
8d3eca20
DA
132 rc = -1;
133 goto out;
134 }
de9ac07b
PZ
135 }
136
137 buf = &data[old & md->mask];
138 size = head - old;
139 old += size;
021e9f47 140
6233dd5e 141 if (perf_record__write(rec, buf, size) < 0) {
8d3eca20
DA
142 rc = -1;
143 goto out;
144 }
de9ac07b
PZ
145
146 md->prev = old;
115d2d89 147 perf_mmap__write_tail(md, old);
8d3eca20
DA
148
149out:
150 return rc;
de9ac07b
PZ
151}
152
153static volatile int done = 0;
f7b7c26e 154static volatile int signr = -1;
33e49ea7 155static volatile int child_finished = 0;
de9ac07b 156
16c8a109 157static void sig_handler(int sig)
de9ac07b 158{
33e49ea7
AK
159 if (sig == SIGCHLD)
160 child_finished = 1;
161
16c8a109 162 done = 1;
f7b7c26e
PZ
163 signr = sig;
164}
165
1d037ca1 166static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
f7b7c26e 167{
d20deb64 168 struct perf_record *rec = arg;
33e49ea7
AK
169 int status;
170
d20deb64 171 if (rec->evlist->workload.pid > 0) {
33e49ea7 172 if (!child_finished)
d20deb64 173 kill(rec->evlist->workload.pid, SIGTERM);
33e49ea7
AK
174
175 wait(&status);
176 if (WIFSIGNALED(status))
d20deb64 177 psignal(WTERMSIG(status), rec->progname);
33e49ea7 178 }
933da83a 179
18483b81 180 if (signr == -1 || signr == SIGUSR1)
f7b7c26e
PZ
181 return;
182
183 signal(signr, SIG_DFL);
de9ac07b
PZ
184}
185
8d3eca20 186static int perf_record__open(struct perf_record *rec)
dd7927f4 187{
56e52e85 188 char msg[512];
6a4bb04c 189 struct perf_evsel *pos;
d20deb64
ACM
190 struct perf_evlist *evlist = rec->evlist;
191 struct perf_session *session = rec->session;
192 struct perf_record_opts *opts = &rec->opts;
8d3eca20 193 int rc = 0;
dd7927f4 194
f77a9518 195 perf_evlist__config(evlist, opts);
cac21425 196
dd7927f4 197 list_for_each_entry(pos, &evlist->entries, node) {
dd7927f4 198try_again:
6a4bb04c 199 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
56e52e85 200 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
d6d901c2 201 if (verbose)
c0a54341 202 ui__warning("%s\n", msg);
d6d901c2
ZY
203 goto try_again;
204 }
ca6a4258 205
56e52e85
ACM
206 rc = -errno;
207 perf_evsel__open_strerror(pos, &opts->target,
208 errno, msg, sizeof(msg));
209 ui__error("%s\n", msg);
8d3eca20 210 goto out;
c171b552
LZ
211 }
212 }
a43d3f08 213
1491a632 214 if (perf_evlist__apply_filters(evlist)) {
0a102479
FW
215 error("failed to set filter with %d (%s)\n", errno,
216 strerror(errno));
8d3eca20
DA
217 rc = -1;
218 goto out;
0a102479
FW
219 }
220
18e60939 221 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
8d3eca20
DA
222 if (errno == EPERM) {
223 pr_err("Permission error mapping pages.\n"
224 "Consider increasing "
225 "/proc/sys/kernel/perf_event_mlock_kb,\n"
226 "or try again with a smaller value of -m/--mmap_pages.\n"
53653d70 227 "(current value: %u)\n", opts->mmap_pages);
8d3eca20 228 rc = -errno;
8d3eca20
DA
229 } else {
230 pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
231 rc = -errno;
232 }
233 goto out;
18e60939 234 }
0a27d7f9 235
563aecb2 236 session->evlist = evlist;
7b56cce2 237 perf_session__set_id_hdr_size(session);
8d3eca20
DA
238out:
239 return rc;
16c8a109
PZ
240}
241
d20deb64 242static int process_buildids(struct perf_record *rec)
6122e4e4 243{
f5fc1412
JO
244 struct perf_data_file *file = &rec->file;
245 struct perf_session *session = rec->session;
7ab75cff 246 u64 start = session->header.data_offset;
6122e4e4 247
f5fc1412 248 u64 size = lseek(file->fd, 0, SEEK_CUR);
9f591fd7
ACM
249 if (size == 0)
250 return 0;
251
7ab75cff
DA
252 return __perf_session__process_events(session, start,
253 size - start,
6122e4e4
ACM
254 size, &build_id__mark_dso_hit_ops);
255}
256
8d3eca20 257static void perf_record__exit(int status, void *arg)
f5970550 258{
d20deb64 259 struct perf_record *rec = arg;
f5fc1412 260 struct perf_data_file *file = &rec->file;
d20deb64 261
8d3eca20
DA
262 if (status != 0)
263 return;
264
f5fc1412 265 if (!file->is_pipe) {
d20deb64
ACM
266 rec->session->header.data_size += rec->bytes_written;
267
268 if (!rec->no_buildid)
269 process_buildids(rec);
270 perf_session__write_header(rec->session, rec->evlist,
f5fc1412 271 file->fd, true);
d20deb64
ACM
272 perf_session__delete(rec->session);
273 perf_evlist__delete(rec->evlist);
d65a458b 274 symbol__exit();
c7929e47 275 }
f5970550
PZ
276}
277
8115d60c 278static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
279{
280 int err;
45694aa7 281 struct perf_tool *tool = data;
a1645ce1
ZY
282 /*
283 *As for guest kernel when processing subcommand record&report,
284 *we arrange module mmap prior to guest kernel mmap and trigger
285 *a preload dso because default guest module symbols are loaded
286 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
287 *method is used to avoid symbol missing when the first addr is
288 *in module instead of in guest kernel.
289 */
45694aa7 290 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 291 machine);
a1645ce1
ZY
292 if (err < 0)
293 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 294 " relocation symbol.\n", machine->pid);
a1645ce1 295
a1645ce1
ZY
296 /*
297 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
298 * have no _text sometimes.
299 */
45694aa7 300 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
743eb868 301 machine, "_text");
a1645ce1 302 if (err < 0)
45694aa7 303 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
743eb868 304 machine, "_stext");
a1645ce1
ZY
305 if (err < 0)
306 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 307 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
308}
309
98402807
FW
310static struct perf_event_header finished_round_event = {
311 .size = sizeof(struct perf_event_header),
312 .type = PERF_RECORD_FINISHED_ROUND,
313};
314
8d3eca20 315static int perf_record__mmap_read_all(struct perf_record *rec)
98402807 316{
0e2e63dd 317 int i;
8d3eca20 318 int rc = 0;
98402807 319
d20deb64 320 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
8d3eca20
DA
321 if (rec->evlist->mmap[i].base) {
322 if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
323 rc = -1;
324 goto out;
325 }
326 }
98402807
FW
327 }
328
2eeaaa09 329 if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
6233dd5e
JO
330 rc = perf_record__write(rec, &finished_round_event,
331 sizeof(finished_round_event));
8d3eca20
DA
332
333out:
334 return rc;
98402807
FW
335}
336
57706abc
DA
337static void perf_record__init_features(struct perf_record *rec)
338{
339 struct perf_evlist *evsel_list = rec->evlist;
340 struct perf_session *session = rec->session;
341 int feat;
342
343 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
344 perf_header__set_feat(&session->header, feat);
345
346 if (rec->no_buildid)
347 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
348
349 if (!have_tracepoints(&evsel_list->entries))
350 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
351
352 if (!rec->opts.branch_stack)
353 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
354}
355
d20deb64 356static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
16c8a109 357{
57706abc 358 int err;
8b412664 359 unsigned long waking = 0;
46be604b 360 const bool forks = argc > 0;
23346f21 361 struct machine *machine;
45694aa7 362 struct perf_tool *tool = &rec->tool;
d20deb64
ACM
363 struct perf_record_opts *opts = &rec->opts;
364 struct perf_evlist *evsel_list = rec->evlist;
f5fc1412 365 struct perf_data_file *file = &rec->file;
d20deb64 366 struct perf_session *session;
2711926a 367 bool disabled = false;
de9ac07b 368
d20deb64 369 rec->progname = argv[0];
33e49ea7 370
d20deb64 371 on_exit(perf_record__sig_exit, rec);
f5970550
PZ
372 signal(SIGCHLD, sig_handler);
373 signal(SIGINT, sig_handler);
18483b81 374 signal(SIGUSR1, sig_handler);
804f7ac7 375 signal(SIGTERM, sig_handler);
f5970550 376
f5fc1412 377 session = perf_session__new(file, false, NULL);
94c744b6 378 if (session == NULL) {
a9a70bbc
ACM
379 pr_err("Not enough memory for reading perf file header\n");
380 return -1;
381 }
382
d20deb64
ACM
383 rec->session = session;
384
57706abc 385 perf_record__init_features(rec);
330aa675 386
d4db3f16 387 if (forks) {
6ef73ec4 388 err = perf_evlist__prepare_workload(evsel_list, &opts->target,
f5fc1412 389 argv, file->is_pipe,
55e162ea 390 true);
35b9d88e
ACM
391 if (err < 0) {
392 pr_err("Couldn't run the workload!\n");
393 goto out_delete_session;
856e9660 394 }
856e9660
PZ
395 }
396
8d3eca20
DA
397 if (perf_record__open(rec) != 0) {
398 err = -1;
399 goto out_delete_session;
400 }
de9ac07b 401
a8bb559b
NK
402 if (!evsel_list->nr_groups)
403 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
404
712a4b60 405 /*
d20deb64 406 * perf_session__delete(session) will be called at perf_record__exit()
712a4b60 407 */
d20deb64 408 on_exit(perf_record__exit, rec);
712a4b60 409
f5fc1412
JO
410 if (file->is_pipe) {
411 err = perf_header__write_pipe(file->fd);
529870e3 412 if (err < 0)
8d3eca20 413 goto out_delete_session;
563aecb2 414 } else {
a91e5431 415 err = perf_session__write_header(session, evsel_list,
f5fc1412 416 file->fd, false);
d5eed904 417 if (err < 0)
8d3eca20 418 goto out_delete_session;
56b03f3c
ACM
419 }
420
d3665498 421 if (!rec->no_buildid
e20960c0 422 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 423 pr_err("Couldn't generate buildids. "
e20960c0 424 "Use --no-buildid to profile anyway.\n");
8d3eca20
DA
425 err = -1;
426 goto out_delete_session;
e20960c0
RR
427 }
428
34ba5122 429 machine = &session->machines.host;
743eb868 430
f5fc1412 431 if (file->is_pipe) {
45694aa7 432 err = perf_event__synthesize_attrs(tool, session,
d20deb64 433 process_synthesized_event);
2c46dbb5
TZ
434 if (err < 0) {
435 pr_err("Couldn't synthesize attrs.\n");
8d3eca20 436 goto out_delete_session;
2c46dbb5 437 }
cd19a035 438
361c99a6 439 if (have_tracepoints(&evsel_list->entries)) {
63e0c771
TZ
440 /*
441 * FIXME err <= 0 here actually means that
442 * there were no tracepoints so its not really
443 * an error, just that we don't need to
444 * synthesize anything. We really have to
445 * return this more properly and also
446 * propagate errors that now are calling die()
447 */
f5fc1412 448 err = perf_event__synthesize_tracing_data(tool, file->fd, evsel_list,
743eb868 449 process_synthesized_event);
63e0c771
TZ
450 if (err <= 0) {
451 pr_err("Couldn't record tracing data.\n");
8d3eca20 452 goto out_delete_session;
63e0c771 453 }
f34b9001 454 rec->bytes_written += err;
63e0c771 455 }
2c46dbb5
TZ
456 }
457
45694aa7 458 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
743eb868 459 machine, "_text");
70162138 460 if (err < 0)
45694aa7 461 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
743eb868 462 machine, "_stext");
c1a3a4b9
ACM
463 if (err < 0)
464 pr_err("Couldn't record kernel reference relocation symbol\n"
465 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
466 "Check /proc/kallsyms permission or run as root.\n");
b7cece76 467
45694aa7 468 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 469 machine);
c1a3a4b9
ACM
470 if (err < 0)
471 pr_err("Couldn't record kernel module information.\n"
472 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
473 "Check /proc/modules permission or run as root.\n");
474
7e383de4 475 if (perf_guest) {
876650e6
ACM
476 machines__process_guests(&session->machines,
477 perf_event__synthesize_guest_os, tool);
7e383de4 478 }
7c6a1c65 479
a33fbd56
ACM
480 err = __machine__synthesize_threads(machine, tool, &opts->target, evsel_list->threads,
481 process_synthesized_event, opts->sample_address);
8d3eca20
DA
482 if (err != 0)
483 goto out_delete_session;
484
d20deb64 485 if (rec->realtime_prio) {
de9ac07b
PZ
486 struct sched_param param;
487
d20deb64 488 param.sched_priority = rec->realtime_prio;
de9ac07b 489 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 490 pr_err("Could not set realtime priority.\n");
8d3eca20
DA
491 err = -1;
492 goto out_delete_session;
de9ac07b
PZ
493 }
494 }
495
774cb499
JO
496 /*
497 * When perf is starting the traced process, all the events
498 * (apart from group members) have enable_on_exec=1 set,
499 * so don't spoil it by prematurely enabling them.
500 */
602ad878 501 if (!target__none(&opts->target))
774cb499 502 perf_evlist__enable(evsel_list);
764e16a3 503
856e9660
PZ
504 /*
505 * Let the child rip
506 */
d4db3f16 507 if (forks)
35b9d88e 508 perf_evlist__start_workload(evsel_list);
856e9660 509
649c48a9 510 for (;;) {
d20deb64 511 int hits = rec->samples;
de9ac07b 512
8d3eca20
DA
513 if (perf_record__mmap_read_all(rec) < 0) {
514 err = -1;
515 goto out_delete_session;
516 }
de9ac07b 517
d20deb64 518 if (hits == rec->samples) {
649c48a9
PZ
519 if (done)
520 break;
5c581041 521 err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
8b412664
PZ
522 waking++;
523 }
524
774cb499
JO
525 /*
526 * When perf is starting the traced process, at the end events
527 * die with the process and we wait for that. Thus no need to
528 * disable events in this case.
529 */
602ad878 530 if (done && !disabled && !target__none(&opts->target)) {
4152ab37 531 perf_evlist__disable(evsel_list);
2711926a
JO
532 disabled = true;
533 }
de9ac07b
PZ
534 }
535
18483b81 536 if (quiet || signr == SIGUSR1)
b44308f5
ACM
537 return 0;
538
8b412664
PZ
539 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
540
021e9f47
IM
541 /*
542 * Approximate RIP event size: 24 bytes.
543 */
544 fprintf(stderr,
9486aa38 545 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
d20deb64 546 (double)rec->bytes_written / 1024.0 / 1024.0,
6a4d98d7 547 file->path,
d20deb64 548 rec->bytes_written / 24);
addc2785 549
de9ac07b 550 return 0;
39d17dac
ACM
551
552out_delete_session:
553 perf_session__delete(session);
554 return err;
de9ac07b 555}
0e9b20b8 556
bdfebd84
RAV
557#define BRANCH_OPT(n, m) \
558 { .name = n, .mode = (m) }
559
560#define BRANCH_END { .name = NULL }
561
562struct branch_mode {
563 const char *name;
564 int mode;
565};
566
567static const struct branch_mode branch_modes[] = {
568 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
569 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
570 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
571 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
572 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
573 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
574 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
0126d493
AK
575 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
576 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
577 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
bdfebd84
RAV
578 BRANCH_END
579};
580
581static int
a5aabdac 582parse_branch_stack(const struct option *opt, const char *str, int unset)
bdfebd84
RAV
583{
584#define ONLY_PLM \
585 (PERF_SAMPLE_BRANCH_USER |\
586 PERF_SAMPLE_BRANCH_KERNEL |\
587 PERF_SAMPLE_BRANCH_HV)
588
589 uint64_t *mode = (uint64_t *)opt->value;
590 const struct branch_mode *br;
a5aabdac 591 char *s, *os = NULL, *p;
bdfebd84
RAV
592 int ret = -1;
593
a5aabdac
SE
594 if (unset)
595 return 0;
bdfebd84 596
a5aabdac
SE
597 /*
598 * cannot set it twice, -b + --branch-filter for instance
599 */
600 if (*mode)
bdfebd84
RAV
601 return -1;
602
a5aabdac
SE
603 /* str may be NULL in case no arg is passed to -b */
604 if (str) {
605 /* because str is read-only */
606 s = os = strdup(str);
607 if (!s)
608 return -1;
609
610 for (;;) {
611 p = strchr(s, ',');
612 if (p)
613 *p = '\0';
614
615 for (br = branch_modes; br->name; br++) {
616 if (!strcasecmp(s, br->name))
617 break;
618 }
619 if (!br->name) {
620 ui__warning("unknown branch filter %s,"
621 " check man page\n", s);
622 goto error;
623 }
bdfebd84 624
a5aabdac 625 *mode |= br->mode;
bdfebd84 626
a5aabdac
SE
627 if (!p)
628 break;
bdfebd84 629
a5aabdac
SE
630 s = p + 1;
631 }
bdfebd84
RAV
632 }
633 ret = 0;
634
a5aabdac 635 /* default to any branch */
bdfebd84 636 if ((*mode & ~ONLY_PLM) == 0) {
a5aabdac 637 *mode = PERF_SAMPLE_BRANCH_ANY;
bdfebd84
RAV
638 }
639error:
640 free(os);
641 return ret;
642}
643
89fe808a 644#ifdef HAVE_LIBUNWIND_SUPPORT
26d33022
JO
645static int get_stack_size(char *str, unsigned long *_size)
646{
647 char *endptr;
648 unsigned long size;
649 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
650
651 size = strtoul(str, &endptr, 0);
652
653 do {
654 if (*endptr)
655 break;
656
657 size = round_up(size, sizeof(u64));
658 if (!size || size > max_size)
659 break;
660
661 *_size = size;
662 return 0;
663
664 } while (0);
665
666 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
667 max_size, str);
668 return -1;
669}
89fe808a 670#endif /* HAVE_LIBUNWIND_SUPPORT */
26d33022 671
09b0fd45 672int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
26d33022 673{
26d33022
JO
674 char *tok, *name, *saveptr = NULL;
675 char *buf;
676 int ret = -1;
677
26d33022
JO
678 /* We need buffer that we know we can write to. */
679 buf = malloc(strlen(arg) + 1);
680 if (!buf)
681 return -ENOMEM;
682
683 strcpy(buf, arg);
684
685 tok = strtok_r((char *)buf, ",", &saveptr);
686 name = tok ? : (char *)buf;
687
688 do {
689 /* Framepointer style */
690 if (!strncmp(name, "fp", sizeof("fp"))) {
691 if (!strtok_r(NULL, ",", &saveptr)) {
c5ff78c3 692 opts->call_graph = CALLCHAIN_FP;
26d33022
JO
693 ret = 0;
694 } else
695 pr_err("callchain: No more arguments "
696 "needed for -g fp\n");
697 break;
698
89fe808a 699#ifdef HAVE_LIBUNWIND_SUPPORT
26d33022
JO
700 /* Dwarf style */
701 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
61eaa3be
ACM
702 const unsigned long default_stack_dump_size = 8192;
703
26d33022 704 ret = 0;
c5ff78c3
ACM
705 opts->call_graph = CALLCHAIN_DWARF;
706 opts->stack_dump_size = default_stack_dump_size;
26d33022
JO
707
708 tok = strtok_r(NULL, ",", &saveptr);
709 if (tok) {
710 unsigned long size = 0;
711
712 ret = get_stack_size(tok, &size);
c5ff78c3 713 opts->stack_dump_size = size;
26d33022 714 }
89fe808a 715#endif /* HAVE_LIBUNWIND_SUPPORT */
26d33022 716 } else {
09b0fd45 717 pr_err("callchain: Unknown --call-graph option "
26d33022
JO
718 "value: %s\n", arg);
719 break;
720 }
721
722 } while (0);
723
724 free(buf);
09b0fd45
JO
725 return ret;
726}
727
728static void callchain_debug(struct perf_record_opts *opts)
729{
730 pr_debug("callchain: type %d\n", opts->call_graph);
26d33022 731
09b0fd45
JO
732 if (opts->call_graph == CALLCHAIN_DWARF)
733 pr_debug("callchain: stack dump size %d\n",
734 opts->stack_dump_size);
735}
736
737int record_parse_callchain_opt(const struct option *opt,
738 const char *arg,
739 int unset)
740{
741 struct perf_record_opts *opts = opt->value;
742 int ret;
743
744 /* --no-call-graph */
745 if (unset) {
746 opts->call_graph = CALLCHAIN_NONE;
747 pr_debug("callchain: disabled\n");
748 return 0;
749 }
750
751 ret = record_parse_callchain(arg, opts);
26d33022 752 if (!ret)
09b0fd45 753 callchain_debug(opts);
26d33022
JO
754
755 return ret;
756}
757
09b0fd45
JO
758int record_callchain_opt(const struct option *opt,
759 const char *arg __maybe_unused,
760 int unset __maybe_unused)
761{
762 struct perf_record_opts *opts = opt->value;
763
764 if (opts->call_graph == CALLCHAIN_NONE)
765 opts->call_graph = CALLCHAIN_FP;
766
767 callchain_debug(opts);
768 return 0;
769}
770
0e9b20b8 771static const char * const record_usage[] = {
9e096753
MG
772 "perf record [<options>] [<command>]",
773 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
774 NULL
775};
776
d20deb64
ACM
777/*
778 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
779 * because we need to have access to it in perf_record__exit, that is called
780 * after cmd_record() exits, but since record_options need to be accessible to
781 * builtin-script, leave it here.
782 *
783 * At least we don't ouch it in all the other functions here directly.
784 *
785 * Just say no to tons of global variables, sigh.
786 */
787static struct perf_record record = {
788 .opts = {
d20deb64
ACM
789 .mmap_pages = UINT_MAX,
790 .user_freq = UINT_MAX,
791 .user_interval = ULLONG_MAX,
447a6013 792 .freq = 4000,
d1cb9fce
NK
793 .target = {
794 .uses_mmap = true,
3aa5939d 795 .default_per_cpu = true,
d1cb9fce 796 },
d20deb64 797 },
d20deb64 798};
7865e817 799
09b0fd45 800#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
61eaa3be 801
89fe808a 802#ifdef HAVE_LIBUNWIND_SUPPORT
09b0fd45 803const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
61eaa3be 804#else
09b0fd45 805const char record_callchain_help[] = CALLCHAIN_HELP "fp";
61eaa3be
ACM
806#endif
807
d20deb64
ACM
808/*
809 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
810 * with it and switch to use the library functions in perf_evlist that came
811 * from builtin-record.c, i.e. use perf_record_opts,
812 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
813 * using pipes, etc.
814 */
bca647aa 815const struct option record_options[] = {
d20deb64 816 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 817 "event selector. use 'perf list' to list available events",
f120f9d5 818 parse_events_option),
d20deb64 819 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 820 "event filter", parse_filter),
bea03405 821 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 822 "record events on existing process id"),
bea03405 823 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 824 "record events on existing thread id"),
d20deb64 825 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 826 "collect data with this RT SCHED_FIFO priority"),
d20deb64 827 OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
acac03fa 828 "collect data without buffering"),
d20deb64 829 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 830 "collect raw sample records from all opened counters"),
bea03405 831 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 832 "system-wide collection from all CPUs"),
bea03405 833 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 834 "list of cpus to monitor"),
d20deb64 835 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
f5fc1412 836 OPT_STRING('o', "output", &record.file.path, "file",
abaff32a 837 "output file name"),
69e7e5b0
AH
838 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
839 &record.opts.no_inherit_set,
840 "child tasks do not inherit counters"),
d20deb64 841 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
994a1f78
JO
842 OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
843 "number of mmap data pages",
844 perf_evlist__parse_mmap_pages),
d20deb64 845 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 846 "put the counters into a counter group"),
09b0fd45
JO
847 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
848 NULL, "enables call-graph recording" ,
849 &record_callchain_opt),
850 OPT_CALLBACK(0, "call-graph", &record.opts,
851 "mode[,dump_size]", record_callchain_help,
852 &record_parse_callchain_opt),
c0555642 853 OPT_INCR('v', "verbose", &verbose,
3da297a6 854 "be more verbose (show counter open errors, etc)"),
b44308f5 855 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 856 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 857 "per thread counts"),
d20deb64 858 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
4bba828d 859 "Sample addresses"),
d20deb64 860 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
3e76ac78 861 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
d20deb64 862 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 863 "don't sample"),
d20deb64 864 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
a1ac1d3c 865 "do not update the buildid cache"),
d20deb64 866 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
baa2f6ce 867 "do not collect buildids in perf.data"),
d20deb64 868 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
869 "monitor event in cgroup name only",
870 parse_cgroups),
bea03405
NK
871 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
872 "user to profile"),
a5aabdac
SE
873
874 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
875 "branch any", "sample any taken branches",
876 parse_branch_stack),
877
878 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
879 "branch filter mask", "branch stack filter modes",
bdfebd84 880 parse_branch_stack),
05484298
AK
881 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
882 "sample by weight (on special events only)"),
475eeab9
AK
883 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
884 "sample transaction flags (special events only)"),
3aa5939d
AH
885 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
886 "use per-thread mmaps"),
0e9b20b8
IM
887 OPT_END()
888};
889
1d037ca1 890int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
0e9b20b8 891{
69aad6f1 892 int err = -ENOMEM;
d20deb64
ACM
893 struct perf_evlist *evsel_list;
894 struct perf_record *rec = &record;
16ad2ffb 895 char errbuf[BUFSIZ];
0e9b20b8 896
334fe7a3 897 evsel_list = perf_evlist__new();
361c99a6
ACM
898 if (evsel_list == NULL)
899 return -ENOMEM;
900
d20deb64
ACM
901 rec->evlist = evsel_list;
902
bca647aa 903 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 904 PARSE_OPT_STOP_AT_NON_OPTION);
602ad878 905 if (!argc && target__none(&rec->opts.target))
bca647aa 906 usage_with_options(record_usage, record_options);
0e9b20b8 907
bea03405 908 if (nr_cgroups && !rec->opts.target.system_wide) {
3780f488
NK
909 ui__error("cgroup monitoring only available in"
910 " system-wide mode\n");
023695d9
SE
911 usage_with_options(record_usage, record_options);
912 }
913
655000e7 914 symbol__init();
baa2f6ce 915
ec80fde7 916 if (symbol_conf.kptr_restrict)
646aaea6
ACM
917 pr_warning(
918"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
919"check /proc/sys/kernel/kptr_restrict.\n\n"
920"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
921"file is not found in the buildid cache or in the vmlinux path.\n\n"
922"Samples in kernel modules won't be resolved at all.\n\n"
923"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
924"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 925
d20deb64 926 if (rec->no_buildid_cache || rec->no_buildid)
a1ac1d3c 927 disable_buildid_cache();
655000e7 928
361c99a6
ACM
929 if (evsel_list->nr_entries == 0 &&
930 perf_evlist__add_default(evsel_list) < 0) {
69aad6f1
ACM
931 pr_err("Not enough memory for event selector list\n");
932 goto out_symbol_exit;
bbd36e5e 933 }
0e9b20b8 934
69e7e5b0
AH
935 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
936 rec->opts.no_inherit = true;
937
602ad878 938 err = target__validate(&rec->opts.target);
16ad2ffb 939 if (err) {
602ad878 940 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
16ad2ffb
NK
941 ui__warning("%s", errbuf);
942 }
943
602ad878 944 err = target__parse_uid(&rec->opts.target);
16ad2ffb
NK
945 if (err) {
946 int saved_errno = errno;
4bd0f2d2 947
602ad878 948 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 949 ui__error("%s", errbuf);
16ad2ffb
NK
950
951 err = -saved_errno;
8fa60e1f 952 goto out_symbol_exit;
16ad2ffb 953 }
0d37aa34 954
16ad2ffb 955 err = -ENOMEM;
b809ac10 956 if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
dd7927f4 957 usage_with_options(record_usage, record_options);
69aad6f1 958
714647bd 959 if (perf_record_opts__config(&rec->opts)) {
39d17dac 960 err = -EINVAL;
5c581041 961 goto out_free_fd;
7e4ff9e3
MG
962 }
963
d20deb64 964 err = __cmd_record(&record, argc, argv);
8fa60e1f
NK
965
966 perf_evlist__munmap(evsel_list);
967 perf_evlist__close(evsel_list);
39d17dac 968out_free_fd:
7e2ed097 969 perf_evlist__delete_maps(evsel_list);
d65a458b
ACM
970out_symbol_exit:
971 symbol__exit();
39d17dac 972 return err;
0e9b20b8 973}
This page took 0.259826 seconds and 5 git commands to generate.