perf tools: Refactor cpumap to hold nr and the map
[deliverable/linux.git] / tools / perf / builtin-record.c
1 /*
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
7 */
8 #define _FILE_OFFSET_BITS 64
9
10 #include "builtin.h"
11
12 #include "perf.h"
13
14 #include "util/build-id.h"
15 #include "util/util.h"
16 #include "util/parse-options.h"
17 #include "util/parse-events.h"
18
19 #include "util/header.h"
20 #include "util/event.h"
21 #include "util/evsel.h"
22 #include "util/debug.h"
23 #include "util/session.h"
24 #include "util/symbol.h"
25 #include "util/cpumap.h"
26
27 #include <unistd.h>
28 #include <sched.h>
29 #include <sys/mman.h>
30
31 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
32
33 enum write_mode_t {
34 WRITE_FORCE,
35 WRITE_APPEND
36 };
37
38 static u64 user_interval = ULLONG_MAX;
39 static u64 default_interval = 0;
40 static u64 sample_type;
41
42 static struct cpu_map *cpus;
43 static unsigned int page_size;
44 static unsigned int mmap_pages = 128;
45 static unsigned int user_freq = UINT_MAX;
46 static int freq = 1000;
47 static int output;
48 static int pipe_output = 0;
49 static const char *output_name = "perf.data";
50 static int group = 0;
51 static int realtime_prio = 0;
52 static bool raw_samples = false;
53 static bool sample_id_all_avail = true;
54 static bool system_wide = false;
55 static pid_t target_pid = -1;
56 static pid_t target_tid = -1;
57 static pid_t *all_tids = NULL;
58 static int thread_num = 0;
59 static pid_t child_pid = -1;
60 static bool no_inherit = false;
61 static enum write_mode_t write_mode = WRITE_FORCE;
62 static bool call_graph = false;
63 static bool inherit_stat = false;
64 static bool no_samples = false;
65 static bool sample_address = false;
66 static bool sample_time = false;
67 static bool no_buildid = false;
68 static bool no_buildid_cache = false;
69
70 static long samples = 0;
71 static u64 bytes_written = 0;
72
73 static struct pollfd *event_array;
74
75 static int nr_poll = 0;
76 static int nr_cpu = 0;
77
78 static int file_new = 1;
79 static off_t post_processing_offset;
80
81 static struct perf_session *session;
82 static const char *cpu_list;
83
84 struct mmap_data {
85 void *base;
86 unsigned int mask;
87 unsigned int prev;
88 };
89
90 static struct mmap_data mmap_array[MAX_NR_CPUS];
91
92 static unsigned long mmap_read_head(struct mmap_data *md)
93 {
94 struct perf_event_mmap_page *pc = md->base;
95 long head;
96
97 head = pc->data_head;
98 rmb();
99
100 return head;
101 }
102
103 static void mmap_write_tail(struct mmap_data *md, unsigned long tail)
104 {
105 struct perf_event_mmap_page *pc = md->base;
106
107 /*
108 * ensure all reads are done before we write the tail out.
109 */
110 /* mb(); */
111 pc->data_tail = tail;
112 }
113
114 static void advance_output(size_t size)
115 {
116 bytes_written += size;
117 }
118
119 static void write_output(void *buf, size_t size)
120 {
121 while (size) {
122 int ret = write(output, buf, size);
123
124 if (ret < 0)
125 die("failed to write");
126
127 size -= ret;
128 buf += ret;
129
130 bytes_written += ret;
131 }
132 }
133
134 static int process_synthesized_event(event_t *event,
135 struct sample_data *sample __used,
136 struct perf_session *self __used)
137 {
138 write_output(event, event->header.size);
139 return 0;
140 }
141
142 static void mmap_read(struct mmap_data *md)
143 {
144 unsigned int head = mmap_read_head(md);
145 unsigned int old = md->prev;
146 unsigned char *data = md->base + page_size;
147 unsigned long size;
148 void *buf;
149 int diff;
150
151 /*
152 * If we're further behind than half the buffer, there's a chance
153 * the writer will bite our tail and mess up the samples under us.
154 *
155 * If we somehow ended up ahead of the head, we got messed up.
156 *
157 * In either case, truncate and restart at head.
158 */
159 diff = head - old;
160 if (diff < 0) {
161 fprintf(stderr, "WARNING: failed to keep up with mmap data\n");
162 /*
163 * head points to a known good entry, start there.
164 */
165 old = head;
166 }
167
168 if (old != head)
169 samples++;
170
171 size = head - old;
172
173 if ((old & md->mask) + size != (head & md->mask)) {
174 buf = &data[old & md->mask];
175 size = md->mask + 1 - (old & md->mask);
176 old += size;
177
178 write_output(buf, size);
179 }
180
181 buf = &data[old & md->mask];
182 size = head - old;
183 old += size;
184
185 write_output(buf, size);
186
187 md->prev = old;
188 mmap_write_tail(md, old);
189 }
190
191 static volatile int done = 0;
192 static volatile int signr = -1;
193
194 static void sig_handler(int sig)
195 {
196 done = 1;
197 signr = sig;
198 }
199
200 static void sig_atexit(void)
201 {
202 if (child_pid > 0)
203 kill(child_pid, SIGTERM);
204
205 if (signr == -1 || signr == SIGUSR1)
206 return;
207
208 signal(signr, SIG_DFL);
209 kill(getpid(), signr);
210 }
211
212 static int group_fd;
213
214 static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr)
215 {
216 struct perf_header_attr *h_attr;
217
218 if (nr < session->header.attrs) {
219 h_attr = session->header.attr[nr];
220 } else {
221 h_attr = perf_header_attr__new(a);
222 if (h_attr != NULL)
223 if (perf_header__add_attr(&session->header, h_attr) < 0) {
224 perf_header_attr__delete(h_attr);
225 h_attr = NULL;
226 }
227 }
228
229 return h_attr;
230 }
231
232 static void create_counter(struct perf_evsel *evsel, int cpu)
233 {
234 char *filter = evsel->filter;
235 struct perf_event_attr *attr = &evsel->attr;
236 struct perf_header_attr *h_attr;
237 int track = !evsel->idx; /* only the first counter needs these */
238 int thread_index;
239 int ret;
240 struct {
241 u64 count;
242 u64 time_enabled;
243 u64 time_running;
244 u64 id;
245 } read_data;
246 /*
247 * Check if parse_single_tracepoint_event has already asked for
248 * PERF_SAMPLE_TIME.
249 *
250 * XXX this is kludgy but short term fix for problems introduced by
251 * eac23d1c that broke 'perf script' by having different sample_types
252 * when using multiple tracepoint events when we use a perf binary
253 * that tries to use sample_id_all on an older kernel.
254 *
255 * We need to move counter creation to perf_session, support
256 * different sample_types, etc.
257 */
258 bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
259
260 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
261 PERF_FORMAT_TOTAL_TIME_RUNNING |
262 PERF_FORMAT_ID;
263
264 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
265
266 if (nr_counters > 1)
267 attr->sample_type |= PERF_SAMPLE_ID;
268
269 /*
270 * We default some events to a 1 default interval. But keep
271 * it a weak assumption overridable by the user.
272 */
273 if (!attr->sample_period || (user_freq != UINT_MAX &&
274 user_interval != ULLONG_MAX)) {
275 if (freq) {
276 attr->sample_type |= PERF_SAMPLE_PERIOD;
277 attr->freq = 1;
278 attr->sample_freq = freq;
279 } else {
280 attr->sample_period = default_interval;
281 }
282 }
283
284 if (no_samples)
285 attr->sample_freq = 0;
286
287 if (inherit_stat)
288 attr->inherit_stat = 1;
289
290 if (sample_address) {
291 attr->sample_type |= PERF_SAMPLE_ADDR;
292 attr->mmap_data = track;
293 }
294
295 if (call_graph)
296 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
297
298 if (system_wide)
299 attr->sample_type |= PERF_SAMPLE_CPU;
300
301 if (sample_id_all_avail &&
302 (sample_time || system_wide || !no_inherit || cpu_list))
303 attr->sample_type |= PERF_SAMPLE_TIME;
304
305 if (raw_samples) {
306 attr->sample_type |= PERF_SAMPLE_TIME;
307 attr->sample_type |= PERF_SAMPLE_RAW;
308 attr->sample_type |= PERF_SAMPLE_CPU;
309 }
310
311 attr->mmap = track;
312 attr->comm = track;
313 attr->inherit = !no_inherit;
314 if (target_pid == -1 && target_tid == -1 && !system_wide) {
315 attr->disabled = 1;
316 attr->enable_on_exec = 1;
317 }
318 retry_sample_id:
319 attr->sample_id_all = sample_id_all_avail ? 1 : 0;
320
321 for (thread_index = 0; thread_index < thread_num; thread_index++) {
322 try_again:
323 FD(evsel, nr_cpu, thread_index) = sys_perf_event_open(attr, all_tids[thread_index], cpu, group_fd, 0);
324
325 if (FD(evsel, nr_cpu, thread_index) < 0) {
326 int err = errno;
327
328 if (err == EPERM || err == EACCES)
329 die("Permission error - are you root?\n"
330 "\t Consider tweaking"
331 " /proc/sys/kernel/perf_event_paranoid.\n");
332 else if (err == ENODEV && cpu_list) {
333 die("No such device - did you specify"
334 " an out-of-range profile CPU?\n");
335 } else if (err == EINVAL && sample_id_all_avail) {
336 /*
337 * Old kernel, no attr->sample_id_type_all field
338 */
339 sample_id_all_avail = false;
340 if (!sample_time && !raw_samples && !time_needed)
341 attr->sample_type &= ~PERF_SAMPLE_TIME;
342
343 goto retry_sample_id;
344 }
345
346 /*
347 * If it's cycles then fall back to hrtimer
348 * based cpu-clock-tick sw counter, which
349 * is always available even if no PMU support:
350 */
351 if (attr->type == PERF_TYPE_HARDWARE
352 && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
353
354 if (verbose)
355 warning(" ... trying to fall back to cpu-clock-ticks\n");
356 attr->type = PERF_TYPE_SOFTWARE;
357 attr->config = PERF_COUNT_SW_CPU_CLOCK;
358 goto try_again;
359 }
360 printf("\n");
361 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
362 FD(evsel, nr_cpu, thread_index), strerror(err));
363
364 #if defined(__i386__) || defined(__x86_64__)
365 if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
366 die("No hardware sampling interrupt available."
367 " No APIC? If so then you can boot the kernel"
368 " with the \"lapic\" boot parameter to"
369 " force-enable it.\n");
370 #endif
371
372 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
373 exit(-1);
374 }
375
376 h_attr = get_header_attr(attr, evsel->idx);
377 if (h_attr == NULL)
378 die("nomem\n");
379
380 if (!file_new) {
381 if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
382 fprintf(stderr, "incompatible append\n");
383 exit(-1);
384 }
385 }
386
387 if (read(FD(evsel, nr_cpu, thread_index), &read_data, sizeof(read_data)) == -1) {
388 perror("Unable to read perf file descriptor");
389 exit(-1);
390 }
391
392 if (perf_header_attr__add_id(h_attr, read_data.id) < 0) {
393 pr_warning("Not enough memory to add id\n");
394 exit(-1);
395 }
396
397 assert(FD(evsel, nr_cpu, thread_index) >= 0);
398 fcntl(FD(evsel, nr_cpu, thread_index), F_SETFL, O_NONBLOCK);
399
400 /*
401 * First counter acts as the group leader:
402 */
403 if (group && group_fd == -1)
404 group_fd = FD(evsel, nr_cpu, thread_index);
405
406 if (evsel->idx || thread_index) {
407 struct perf_evsel *first;
408 first = list_entry(evsel_list.next, struct perf_evsel, node);
409 ret = ioctl(FD(evsel, nr_cpu, thread_index),
410 PERF_EVENT_IOC_SET_OUTPUT,
411 FD(first, nr_cpu, 0));
412 if (ret) {
413 error("failed to set output: %d (%s)\n", errno,
414 strerror(errno));
415 exit(-1);
416 }
417 } else {
418 mmap_array[nr_cpu].prev = 0;
419 mmap_array[nr_cpu].mask = mmap_pages*page_size - 1;
420 mmap_array[nr_cpu].base = mmap(NULL, (mmap_pages+1)*page_size,
421 PROT_READ | PROT_WRITE, MAP_SHARED, FD(evsel, nr_cpu, thread_index), 0);
422 if (mmap_array[nr_cpu].base == MAP_FAILED) {
423 error("failed to mmap with %d (%s)\n", errno, strerror(errno));
424 exit(-1);
425 }
426
427 event_array[nr_poll].fd = FD(evsel, nr_cpu, thread_index);
428 event_array[nr_poll].events = POLLIN;
429 nr_poll++;
430 }
431
432 if (filter != NULL) {
433 ret = ioctl(FD(evsel, nr_cpu, thread_index),
434 PERF_EVENT_IOC_SET_FILTER, filter);
435 if (ret) {
436 error("failed to set filter with %d (%s)\n", errno,
437 strerror(errno));
438 exit(-1);
439 }
440 }
441 }
442
443 if (!sample_type)
444 sample_type = attr->sample_type;
445 }
446
447 static void open_counters(int cpu)
448 {
449 struct perf_evsel *pos;
450
451 group_fd = -1;
452
453 list_for_each_entry(pos, &evsel_list, node)
454 create_counter(pos, cpu);
455
456 nr_cpu++;
457 }
458
459 static int process_buildids(void)
460 {
461 u64 size = lseek(output, 0, SEEK_CUR);
462
463 if (size == 0)
464 return 0;
465
466 session->fd = output;
467 return __perf_session__process_events(session, post_processing_offset,
468 size - post_processing_offset,
469 size, &build_id__mark_dso_hit_ops);
470 }
471
472 static void atexit_header(void)
473 {
474 if (!pipe_output) {
475 session->header.data_size += bytes_written;
476
477 if (!no_buildid)
478 process_buildids();
479 perf_header__write(&session->header, output, true);
480 perf_session__delete(session);
481 symbol__exit();
482 }
483 }
484
485 static void event__synthesize_guest_os(struct machine *machine, void *data)
486 {
487 int err;
488 struct perf_session *psession = data;
489
490 if (machine__is_host(machine))
491 return;
492
493 /*
494 *As for guest kernel when processing subcommand record&report,
495 *we arrange module mmap prior to guest kernel mmap and trigger
496 *a preload dso because default guest module symbols are loaded
497 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
498 *method is used to avoid symbol missing when the first addr is
499 *in module instead of in guest kernel.
500 */
501 err = event__synthesize_modules(process_synthesized_event,
502 psession, machine);
503 if (err < 0)
504 pr_err("Couldn't record guest kernel [%d]'s reference"
505 " relocation symbol.\n", machine->pid);
506
507 /*
508 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
509 * have no _text sometimes.
510 */
511 err = event__synthesize_kernel_mmap(process_synthesized_event,
512 psession, machine, "_text");
513 if (err < 0)
514 err = event__synthesize_kernel_mmap(process_synthesized_event,
515 psession, machine, "_stext");
516 if (err < 0)
517 pr_err("Couldn't record guest kernel [%d]'s reference"
518 " relocation symbol.\n", machine->pid);
519 }
520
521 static struct perf_event_header finished_round_event = {
522 .size = sizeof(struct perf_event_header),
523 .type = PERF_RECORD_FINISHED_ROUND,
524 };
525
526 static void mmap_read_all(void)
527 {
528 int i;
529
530 for (i = 0; i < nr_cpu; i++) {
531 if (mmap_array[i].base)
532 mmap_read(&mmap_array[i]);
533 }
534
535 if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
536 write_output(&finished_round_event, sizeof(finished_round_event));
537 }
538
539 static int __cmd_record(int argc, const char **argv)
540 {
541 int i;
542 struct stat st;
543 int flags;
544 int err;
545 unsigned long waking = 0;
546 int child_ready_pipe[2], go_pipe[2];
547 const bool forks = argc > 0;
548 char buf;
549 struct machine *machine;
550
551 page_size = sysconf(_SC_PAGE_SIZE);
552
553 atexit(sig_atexit);
554 signal(SIGCHLD, sig_handler);
555 signal(SIGINT, sig_handler);
556 signal(SIGUSR1, sig_handler);
557
558 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
559 perror("failed to create pipes");
560 exit(-1);
561 }
562
563 if (!strcmp(output_name, "-"))
564 pipe_output = 1;
565 else if (!stat(output_name, &st) && st.st_size) {
566 if (write_mode == WRITE_FORCE) {
567 char oldname[PATH_MAX];
568 snprintf(oldname, sizeof(oldname), "%s.old",
569 output_name);
570 unlink(oldname);
571 rename(output_name, oldname);
572 }
573 } else if (write_mode == WRITE_APPEND) {
574 write_mode = WRITE_FORCE;
575 }
576
577 flags = O_CREAT|O_RDWR;
578 if (write_mode == WRITE_APPEND)
579 file_new = 0;
580 else
581 flags |= O_TRUNC;
582
583 if (pipe_output)
584 output = STDOUT_FILENO;
585 else
586 output = open(output_name, flags, S_IRUSR | S_IWUSR);
587 if (output < 0) {
588 perror("failed to create output file");
589 exit(-1);
590 }
591
592 session = perf_session__new(output_name, O_WRONLY,
593 write_mode == WRITE_FORCE, false, NULL);
594 if (session == NULL) {
595 pr_err("Not enough memory for reading perf file header\n");
596 return -1;
597 }
598
599 if (!no_buildid)
600 perf_header__set_feat(&session->header, HEADER_BUILD_ID);
601
602 if (!file_new) {
603 err = perf_header__read(session, output);
604 if (err < 0)
605 goto out_delete_session;
606 }
607
608 if (have_tracepoints(&evsel_list))
609 perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
610
611 /*
612 * perf_session__delete(session) will be called at atexit_header()
613 */
614 atexit(atexit_header);
615
616 if (forks) {
617 child_pid = fork();
618 if (child_pid < 0) {
619 perror("failed to fork");
620 exit(-1);
621 }
622
623 if (!child_pid) {
624 if (pipe_output)
625 dup2(2, 1);
626 close(child_ready_pipe[0]);
627 close(go_pipe[1]);
628 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
629
630 /*
631 * Do a dummy execvp to get the PLT entry resolved,
632 * so we avoid the resolver overhead on the real
633 * execvp call.
634 */
635 execvp("", (char **)argv);
636
637 /*
638 * Tell the parent we're ready to go
639 */
640 close(child_ready_pipe[1]);
641
642 /*
643 * Wait until the parent tells us to go.
644 */
645 if (read(go_pipe[0], &buf, 1) == -1)
646 perror("unable to read pipe");
647
648 execvp(argv[0], (char **)argv);
649
650 perror(argv[0]);
651 kill(getppid(), SIGUSR1);
652 exit(-1);
653 }
654
655 if (!system_wide && target_tid == -1 && target_pid == -1)
656 all_tids[0] = child_pid;
657
658 close(child_ready_pipe[1]);
659 close(go_pipe[0]);
660 /*
661 * wait for child to settle
662 */
663 if (read(child_ready_pipe[0], &buf, 1) == -1) {
664 perror("unable to read pipe");
665 exit(-1);
666 }
667 close(child_ready_pipe[0]);
668 }
669
670 if (!system_wide && no_inherit && !cpu_list) {
671 open_counters(-1);
672 } else {
673 for (i = 0; i < cpus->nr; i++)
674 open_counters(cpus->map[i]);
675 }
676
677 perf_session__set_sample_type(session, sample_type);
678
679 if (pipe_output) {
680 err = perf_header__write_pipe(output);
681 if (err < 0)
682 return err;
683 } else if (file_new) {
684 err = perf_header__write(&session->header, output, false);
685 if (err < 0)
686 return err;
687 }
688
689 post_processing_offset = lseek(output, 0, SEEK_CUR);
690
691 perf_session__set_sample_id_all(session, sample_id_all_avail);
692
693 if (pipe_output) {
694 err = event__synthesize_attrs(&session->header,
695 process_synthesized_event,
696 session);
697 if (err < 0) {
698 pr_err("Couldn't synthesize attrs.\n");
699 return err;
700 }
701
702 err = event__synthesize_event_types(process_synthesized_event,
703 session);
704 if (err < 0) {
705 pr_err("Couldn't synthesize event_types.\n");
706 return err;
707 }
708
709 if (have_tracepoints(&evsel_list)) {
710 /*
711 * FIXME err <= 0 here actually means that
712 * there were no tracepoints so its not really
713 * an error, just that we don't need to
714 * synthesize anything. We really have to
715 * return this more properly and also
716 * propagate errors that now are calling die()
717 */
718 err = event__synthesize_tracing_data(output, &evsel_list,
719 process_synthesized_event,
720 session);
721 if (err <= 0) {
722 pr_err("Couldn't record tracing data.\n");
723 return err;
724 }
725 advance_output(err);
726 }
727 }
728
729 machine = perf_session__find_host_machine(session);
730 if (!machine) {
731 pr_err("Couldn't find native kernel information.\n");
732 return -1;
733 }
734
735 err = event__synthesize_kernel_mmap(process_synthesized_event,
736 session, machine, "_text");
737 if (err < 0)
738 err = event__synthesize_kernel_mmap(process_synthesized_event,
739 session, machine, "_stext");
740 if (err < 0)
741 pr_err("Couldn't record kernel reference relocation symbol\n"
742 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
743 "Check /proc/kallsyms permission or run as root.\n");
744
745 err = event__synthesize_modules(process_synthesized_event,
746 session, machine);
747 if (err < 0)
748 pr_err("Couldn't record kernel module information.\n"
749 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
750 "Check /proc/modules permission or run as root.\n");
751
752 if (perf_guest)
753 perf_session__process_machines(session, event__synthesize_guest_os);
754
755 if (!system_wide)
756 event__synthesize_thread(target_tid, process_synthesized_event,
757 session);
758 else
759 event__synthesize_threads(process_synthesized_event, session);
760
761 if (realtime_prio) {
762 struct sched_param param;
763
764 param.sched_priority = realtime_prio;
765 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
766 pr_err("Could not set realtime priority.\n");
767 exit(-1);
768 }
769 }
770
771 /*
772 * Let the child rip
773 */
774 if (forks)
775 close(go_pipe[1]);
776
777 for (;;) {
778 int hits = samples;
779 int thread;
780
781 mmap_read_all();
782
783 if (hits == samples) {
784 if (done)
785 break;
786 err = poll(event_array, nr_poll, -1);
787 waking++;
788 }
789
790 if (done) {
791 for (i = 0; i < nr_cpu; i++) {
792 struct perf_evsel *pos;
793
794 list_for_each_entry(pos, &evsel_list, node) {
795 for (thread = 0;
796 thread < thread_num;
797 thread++)
798 ioctl(FD(pos, i, thread),
799 PERF_EVENT_IOC_DISABLE);
800 }
801 }
802 }
803 }
804
805 if (quiet || signr == SIGUSR1)
806 return 0;
807
808 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
809
810 /*
811 * Approximate RIP event size: 24 bytes.
812 */
813 fprintf(stderr,
814 "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n",
815 (double)bytes_written / 1024.0 / 1024.0,
816 output_name,
817 bytes_written / 24);
818
819 return 0;
820
821 out_delete_session:
822 perf_session__delete(session);
823 return err;
824 }
825
826 static const char * const record_usage[] = {
827 "perf record [<options>] [<command>]",
828 "perf record [<options>] -- <command> [<options>]",
829 NULL
830 };
831
832 static bool force, append_file;
833
834 const struct option record_options[] = {
835 OPT_CALLBACK('e', "event", NULL, "event",
836 "event selector. use 'perf list' to list available events",
837 parse_events),
838 OPT_CALLBACK(0, "filter", NULL, "filter",
839 "event filter", parse_filter),
840 OPT_INTEGER('p', "pid", &target_pid,
841 "record events on existing process id"),
842 OPT_INTEGER('t', "tid", &target_tid,
843 "record events on existing thread id"),
844 OPT_INTEGER('r', "realtime", &realtime_prio,
845 "collect data with this RT SCHED_FIFO priority"),
846 OPT_BOOLEAN('R', "raw-samples", &raw_samples,
847 "collect raw sample records from all opened counters"),
848 OPT_BOOLEAN('a', "all-cpus", &system_wide,
849 "system-wide collection from all CPUs"),
850 OPT_BOOLEAN('A', "append", &append_file,
851 "append to the output file to do incremental profiling"),
852 OPT_STRING('C', "cpu", &cpu_list, "cpu",
853 "list of cpus to monitor"),
854 OPT_BOOLEAN('f', "force", &force,
855 "overwrite existing data file (deprecated)"),
856 OPT_U64('c', "count", &user_interval, "event period to sample"),
857 OPT_STRING('o', "output", &output_name, "file",
858 "output file name"),
859 OPT_BOOLEAN('i', "no-inherit", &no_inherit,
860 "child tasks do not inherit counters"),
861 OPT_UINTEGER('F', "freq", &user_freq, "profile at this frequency"),
862 OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
863 OPT_BOOLEAN('g', "call-graph", &call_graph,
864 "do call-graph (stack chain/backtrace) recording"),
865 OPT_INCR('v', "verbose", &verbose,
866 "be more verbose (show counter open errors, etc)"),
867 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
868 OPT_BOOLEAN('s', "stat", &inherit_stat,
869 "per thread counts"),
870 OPT_BOOLEAN('d', "data", &sample_address,
871 "Sample addresses"),
872 OPT_BOOLEAN('T', "timestamp", &sample_time, "Sample timestamps"),
873 OPT_BOOLEAN('n', "no-samples", &no_samples,
874 "don't sample"),
875 OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid_cache,
876 "do not update the buildid cache"),
877 OPT_BOOLEAN('B', "no-buildid", &no_buildid,
878 "do not collect buildids in perf.data"),
879 OPT_END()
880 };
881
882 int cmd_record(int argc, const char **argv, const char *prefix __used)
883 {
884 int err = -ENOMEM;
885 struct perf_evsel *pos;
886
887 argc = parse_options(argc, argv, record_options, record_usage,
888 PARSE_OPT_STOP_AT_NON_OPTION);
889 if (!argc && target_pid == -1 && target_tid == -1 &&
890 !system_wide && !cpu_list)
891 usage_with_options(record_usage, record_options);
892
893 if (force && append_file) {
894 fprintf(stderr, "Can't overwrite and append at the same time."
895 " You need to choose between -f and -A");
896 usage_with_options(record_usage, record_options);
897 } else if (append_file) {
898 write_mode = WRITE_APPEND;
899 } else {
900 write_mode = WRITE_FORCE;
901 }
902
903 symbol__init();
904
905 if (no_buildid_cache || no_buildid)
906 disable_buildid_cache();
907
908 if (list_empty(&evsel_list) && perf_evsel_list__create_default() < 0) {
909 pr_err("Not enough memory for event selector list\n");
910 goto out_symbol_exit;
911 }
912
913 if (target_pid != -1) {
914 target_tid = target_pid;
915 thread_num = find_all_tid(target_pid, &all_tids);
916 if (thread_num <= 0) {
917 fprintf(stderr, "Can't find all threads of pid %d\n",
918 target_pid);
919 usage_with_options(record_usage, record_options);
920 }
921 } else {
922 all_tids=malloc(sizeof(pid_t));
923 if (!all_tids)
924 goto out_symbol_exit;
925
926 all_tids[0] = target_tid;
927 thread_num = 1;
928 }
929
930 cpus = cpu_map__new(cpu_list);
931 if (cpus == NULL) {
932 perror("failed to parse CPUs map");
933 return -1;
934 }
935
936 list_for_each_entry(pos, &evsel_list, node) {
937 if (perf_evsel__alloc_fd(pos, cpus->nr, thread_num) < 0)
938 goto out_free_fd;
939 }
940 event_array = malloc(
941 sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
942 if (!event_array)
943 goto out_free_fd;
944
945 if (user_interval != ULLONG_MAX)
946 default_interval = user_interval;
947 if (user_freq != UINT_MAX)
948 freq = user_freq;
949
950 /*
951 * User specified count overrides default frequency.
952 */
953 if (default_interval)
954 freq = 0;
955 else if (freq) {
956 default_interval = freq;
957 } else {
958 fprintf(stderr, "frequency and count are zero, aborting\n");
959 err = -EINVAL;
960 goto out_free_event_array;
961 }
962
963 err = __cmd_record(argc, argv);
964
965 out_free_event_array:
966 free(event_array);
967 out_free_fd:
968 free(all_tids);
969 all_tids = NULL;
970 out_symbol_exit:
971 symbol__exit();
972 return err;
973 }
This page took 0.071568 seconds and 5 git commands to generate.