2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <api/fs/debugfs.h>
13 #include "thread_map.h"
20 #include "parse-events.h"
21 #include "parse-options.h"
25 #include <linux/bitops.h>
26 #include <linux/hash.h>
28 static void perf_evlist__mmap_put(struct perf_evlist
*evlist
, int idx
);
29 static void __perf_evlist__munmap(struct perf_evlist
*evlist
, int idx
);
31 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
32 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
34 void perf_evlist__init(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
35 struct thread_map
*threads
)
39 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
40 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
41 INIT_LIST_HEAD(&evlist
->entries
);
42 perf_evlist__set_maps(evlist
, cpus
, threads
);
43 fdarray__init(&evlist
->pollfd
, 64);
44 evlist
->workload
.pid
= -1;
47 struct perf_evlist
*perf_evlist__new(void)
49 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
52 perf_evlist__init(evlist
, NULL
, NULL
);
57 struct perf_evlist
*perf_evlist__new_default(void)
59 struct perf_evlist
*evlist
= perf_evlist__new();
61 if (evlist
&& perf_evlist__add_default(evlist
)) {
62 perf_evlist__delete(evlist
);
70 * perf_evlist__set_id_pos - set the positions of event ids.
71 * @evlist: selected event list
73 * Events with compatible sample types all have the same id_pos
74 * and is_pos. For convenience, put a copy on evlist.
76 void perf_evlist__set_id_pos(struct perf_evlist
*evlist
)
78 struct perf_evsel
*first
= perf_evlist__first(evlist
);
80 evlist
->id_pos
= first
->id_pos
;
81 evlist
->is_pos
= first
->is_pos
;
84 static void perf_evlist__update_id_pos(struct perf_evlist
*evlist
)
86 struct perf_evsel
*evsel
;
88 evlist__for_each(evlist
, evsel
)
89 perf_evsel__calc_id_pos(evsel
);
91 perf_evlist__set_id_pos(evlist
);
94 static void perf_evlist__purge(struct perf_evlist
*evlist
)
96 struct perf_evsel
*pos
, *n
;
98 evlist__for_each_safe(evlist
, n
, pos
) {
99 list_del_init(&pos
->node
);
100 perf_evsel__delete(pos
);
103 evlist
->nr_entries
= 0;
106 void perf_evlist__exit(struct perf_evlist
*evlist
)
108 zfree(&evlist
->mmap
);
109 fdarray__exit(&evlist
->pollfd
);
112 void perf_evlist__delete(struct perf_evlist
*evlist
)
114 perf_evlist__munmap(evlist
);
115 perf_evlist__close(evlist
);
116 cpu_map__delete(evlist
->cpus
);
117 thread_map__delete(evlist
->threads
);
119 evlist
->threads
= NULL
;
120 perf_evlist__purge(evlist
);
121 perf_evlist__exit(evlist
);
125 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
127 list_add_tail(&entry
->node
, &evlist
->entries
);
128 entry
->idx
= evlist
->nr_entries
;
129 entry
->tracking
= !entry
->idx
;
131 if (!evlist
->nr_entries
++)
132 perf_evlist__set_id_pos(evlist
);
135 void perf_evlist__splice_list_tail(struct perf_evlist
*evlist
,
136 struct list_head
*list
,
139 bool set_id_pos
= !evlist
->nr_entries
;
141 list_splice_tail(list
, &evlist
->entries
);
142 evlist
->nr_entries
+= nr_entries
;
144 perf_evlist__set_id_pos(evlist
);
147 void __perf_evlist__set_leader(struct list_head
*list
)
149 struct perf_evsel
*evsel
, *leader
;
151 leader
= list_entry(list
->next
, struct perf_evsel
, node
);
152 evsel
= list_entry(list
->prev
, struct perf_evsel
, node
);
154 leader
->nr_members
= evsel
->idx
- leader
->idx
+ 1;
156 __evlist__for_each(list
, evsel
) {
157 evsel
->leader
= leader
;
161 void perf_evlist__set_leader(struct perf_evlist
*evlist
)
163 if (evlist
->nr_entries
) {
164 evlist
->nr_groups
= evlist
->nr_entries
> 1 ? 1 : 0;
165 __perf_evlist__set_leader(&evlist
->entries
);
169 int perf_evlist__add_default(struct perf_evlist
*evlist
)
171 struct perf_event_attr attr
= {
172 .type
= PERF_TYPE_HARDWARE
,
173 .config
= PERF_COUNT_HW_CPU_CYCLES
,
175 struct perf_evsel
*evsel
;
177 event_attr_init(&attr
);
179 evsel
= perf_evsel__new(&attr
);
183 /* use strdup() because free(evsel) assumes name is allocated */
184 evsel
->name
= strdup("cycles");
188 perf_evlist__add(evlist
, evsel
);
191 perf_evsel__delete(evsel
);
196 static int perf_evlist__add_attrs(struct perf_evlist
*evlist
,
197 struct perf_event_attr
*attrs
, size_t nr_attrs
)
199 struct perf_evsel
*evsel
, *n
;
203 for (i
= 0; i
< nr_attrs
; i
++) {
204 evsel
= perf_evsel__new_idx(attrs
+ i
, evlist
->nr_entries
+ i
);
206 goto out_delete_partial_list
;
207 list_add_tail(&evsel
->node
, &head
);
210 perf_evlist__splice_list_tail(evlist
, &head
, nr_attrs
);
214 out_delete_partial_list
:
215 __evlist__for_each_safe(&head
, n
, evsel
)
216 perf_evsel__delete(evsel
);
220 int __perf_evlist__add_default_attrs(struct perf_evlist
*evlist
,
221 struct perf_event_attr
*attrs
, size_t nr_attrs
)
225 for (i
= 0; i
< nr_attrs
; i
++)
226 event_attr_init(attrs
+ i
);
228 return perf_evlist__add_attrs(evlist
, attrs
, nr_attrs
);
232 perf_evlist__find_tracepoint_by_id(struct perf_evlist
*evlist
, int id
)
234 struct perf_evsel
*evsel
;
236 evlist__for_each(evlist
, evsel
) {
237 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
238 (int)evsel
->attr
.config
== id
)
246 perf_evlist__find_tracepoint_by_name(struct perf_evlist
*evlist
,
249 struct perf_evsel
*evsel
;
251 evlist__for_each(evlist
, evsel
) {
252 if ((evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
) &&
253 (strcmp(evsel
->name
, name
) == 0))
260 int perf_evlist__add_newtp(struct perf_evlist
*evlist
,
261 const char *sys
, const char *name
, void *handler
)
263 struct perf_evsel
*evsel
= perf_evsel__newtp(sys
, name
);
268 evsel
->handler
= handler
;
269 perf_evlist__add(evlist
, evsel
);
273 static int perf_evlist__nr_threads(struct perf_evlist
*evlist
,
274 struct perf_evsel
*evsel
)
276 if (evsel
->system_wide
)
279 return thread_map__nr(evlist
->threads
);
282 void perf_evlist__disable(struct perf_evlist
*evlist
)
285 struct perf_evsel
*pos
;
286 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
289 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
290 evlist__for_each(evlist
, pos
) {
291 if (!perf_evsel__is_group_leader(pos
) || !pos
->fd
)
293 nr_threads
= perf_evlist__nr_threads(evlist
, pos
);
294 for (thread
= 0; thread
< nr_threads
; thread
++)
295 ioctl(FD(pos
, cpu
, thread
),
296 PERF_EVENT_IOC_DISABLE
, 0);
301 void perf_evlist__enable(struct perf_evlist
*evlist
)
304 struct perf_evsel
*pos
;
305 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
308 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
309 evlist__for_each(evlist
, pos
) {
310 if (!perf_evsel__is_group_leader(pos
) || !pos
->fd
)
312 nr_threads
= perf_evlist__nr_threads(evlist
, pos
);
313 for (thread
= 0; thread
< nr_threads
; thread
++)
314 ioctl(FD(pos
, cpu
, thread
),
315 PERF_EVENT_IOC_ENABLE
, 0);
320 int perf_evlist__disable_event(struct perf_evlist
*evlist
,
321 struct perf_evsel
*evsel
)
323 int cpu
, thread
, err
;
324 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
325 int nr_threads
= perf_evlist__nr_threads(evlist
, evsel
);
330 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
331 for (thread
= 0; thread
< nr_threads
; thread
++) {
332 err
= ioctl(FD(evsel
, cpu
, thread
),
333 PERF_EVENT_IOC_DISABLE
, 0);
341 int perf_evlist__enable_event(struct perf_evlist
*evlist
,
342 struct perf_evsel
*evsel
)
344 int cpu
, thread
, err
;
345 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
346 int nr_threads
= perf_evlist__nr_threads(evlist
, evsel
);
351 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
352 for (thread
= 0; thread
< nr_threads
; thread
++) {
353 err
= ioctl(FD(evsel
, cpu
, thread
),
354 PERF_EVENT_IOC_ENABLE
, 0);
362 static int perf_evlist__enable_event_cpu(struct perf_evlist
*evlist
,
363 struct perf_evsel
*evsel
, int cpu
)
366 int nr_threads
= perf_evlist__nr_threads(evlist
, evsel
);
371 for (thread
= 0; thread
< nr_threads
; thread
++) {
372 err
= ioctl(FD(evsel
, cpu
, thread
),
373 PERF_EVENT_IOC_ENABLE
, 0);
380 static int perf_evlist__enable_event_thread(struct perf_evlist
*evlist
,
381 struct perf_evsel
*evsel
,
385 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
390 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
391 err
= ioctl(FD(evsel
, cpu
, thread
), PERF_EVENT_IOC_ENABLE
, 0);
398 int perf_evlist__enable_event_idx(struct perf_evlist
*evlist
,
399 struct perf_evsel
*evsel
, int idx
)
401 bool per_cpu_mmaps
= !cpu_map__empty(evlist
->cpus
);
404 return perf_evlist__enable_event_cpu(evlist
, evsel
, idx
);
406 return perf_evlist__enable_event_thread(evlist
, evsel
, idx
);
409 int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
)
411 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
412 int nr_threads
= thread_map__nr(evlist
->threads
);
414 struct perf_evsel
*evsel
;
416 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
417 if (evsel
->system_wide
)
420 nfds
+= nr_cpus
* nr_threads
;
423 if (fdarray__available_entries(&evlist
->pollfd
) < nfds
&&
424 fdarray__grow(&evlist
->pollfd
, nfds
) < 0)
430 static int __perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
, int idx
)
432 int pos
= fdarray__add(&evlist
->pollfd
, fd
, POLLIN
| POLLERR
| POLLHUP
);
434 * Save the idx so that when we filter out fds POLLHUP'ed we can
435 * close the associated evlist->mmap[] entry.
438 evlist
->pollfd
.priv
[pos
].idx
= idx
;
440 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
446 int perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
448 return __perf_evlist__add_pollfd(evlist
, fd
, -1);
451 static void perf_evlist__munmap_filtered(struct fdarray
*fda
, int fd
)
453 struct perf_evlist
*evlist
= container_of(fda
, struct perf_evlist
, pollfd
);
455 perf_evlist__mmap_put(evlist
, fda
->priv
[fd
].idx
);
458 int perf_evlist__filter_pollfd(struct perf_evlist
*evlist
, short revents_and_mask
)
460 return fdarray__filter(&evlist
->pollfd
, revents_and_mask
,
461 perf_evlist__munmap_filtered
);
464 int perf_evlist__poll(struct perf_evlist
*evlist
, int timeout
)
466 return fdarray__poll(&evlist
->pollfd
, timeout
);
469 static void perf_evlist__id_hash(struct perf_evlist
*evlist
,
470 struct perf_evsel
*evsel
,
471 int cpu
, int thread
, u64 id
)
474 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
478 hash
= hash_64(sid
->id
, PERF_EVLIST__HLIST_BITS
);
479 hlist_add_head(&sid
->node
, &evlist
->heads
[hash
]);
482 void perf_evlist__id_add(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
483 int cpu
, int thread
, u64 id
)
485 perf_evlist__id_hash(evlist
, evsel
, cpu
, thread
, id
);
486 evsel
->id
[evsel
->ids
++] = id
;
489 static int perf_evlist__id_add_fd(struct perf_evlist
*evlist
,
490 struct perf_evsel
*evsel
,
491 int cpu
, int thread
, int fd
)
493 u64 read_data
[4] = { 0, };
494 int id_idx
= 1; /* The first entry is the counter value */
498 ret
= ioctl(fd
, PERF_EVENT_IOC_ID
, &id
);
505 /* Legacy way to get event id.. All hail to old kernels! */
508 * This way does not work with group format read, so bail
511 if (perf_evlist__read_format(evlist
) & PERF_FORMAT_GROUP
)
514 if (!(evsel
->attr
.read_format
& PERF_FORMAT_ID
) ||
515 read(fd
, &read_data
, sizeof(read_data
)) == -1)
518 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
520 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
523 id
= read_data
[id_idx
];
526 perf_evlist__id_add(evlist
, evsel
, cpu
, thread
, id
);
530 struct perf_sample_id
*perf_evlist__id2sid(struct perf_evlist
*evlist
, u64 id
)
532 struct hlist_head
*head
;
533 struct perf_sample_id
*sid
;
536 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
537 head
= &evlist
->heads
[hash
];
539 hlist_for_each_entry(sid
, head
, node
)
546 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
548 struct perf_sample_id
*sid
;
550 if (evlist
->nr_entries
== 1)
551 return perf_evlist__first(evlist
);
553 sid
= perf_evlist__id2sid(evlist
, id
);
557 if (!perf_evlist__sample_id_all(evlist
))
558 return perf_evlist__first(evlist
);
563 static int perf_evlist__event2id(struct perf_evlist
*evlist
,
564 union perf_event
*event
, u64
*id
)
566 const u64
*array
= event
->sample
.array
;
569 n
= (event
->header
.size
- sizeof(event
->header
)) >> 3;
571 if (event
->header
.type
== PERF_RECORD_SAMPLE
) {
572 if (evlist
->id_pos
>= n
)
574 *id
= array
[evlist
->id_pos
];
576 if (evlist
->is_pos
> n
)
584 static struct perf_evsel
*perf_evlist__event2evsel(struct perf_evlist
*evlist
,
585 union perf_event
*event
)
587 struct perf_evsel
*first
= perf_evlist__first(evlist
);
588 struct hlist_head
*head
;
589 struct perf_sample_id
*sid
;
593 if (evlist
->nr_entries
== 1)
596 if (!first
->attr
.sample_id_all
&&
597 event
->header
.type
!= PERF_RECORD_SAMPLE
)
600 if (perf_evlist__event2id(evlist
, event
, &id
))
603 /* Synthesized events have an id of zero */
607 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
608 head
= &evlist
->heads
[hash
];
610 hlist_for_each_entry(sid
, head
, node
) {
617 union perf_event
*perf_evlist__mmap_read(struct perf_evlist
*evlist
, int idx
)
619 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
620 unsigned int head
= perf_mmap__read_head(md
);
621 unsigned int old
= md
->prev
;
622 unsigned char *data
= md
->base
+ page_size
;
623 union perf_event
*event
= NULL
;
625 if (evlist
->overwrite
) {
627 * If we're further behind than half the buffer, there's a chance
628 * the writer will bite our tail and mess up the samples under us.
630 * If we somehow ended up ahead of the head, we got messed up.
632 * In either case, truncate and restart at head.
634 int diff
= head
- old
;
635 if (diff
> md
->mask
/ 2 || diff
< 0) {
636 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
639 * head points to a known good entry, start there.
648 event
= (union perf_event
*)&data
[old
& md
->mask
];
649 size
= event
->header
.size
;
652 * Event straddles the mmap boundary -- header should always
653 * be inside due to u64 alignment of output.
655 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
656 unsigned int offset
= old
;
657 unsigned int len
= min(sizeof(*event
), size
), cpy
;
658 void *dst
= md
->event_copy
;
661 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
662 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
668 event
= (union perf_event
*) md
->event_copy
;
679 static bool perf_mmap__empty(struct perf_mmap
*md
)
681 return perf_mmap__read_head(md
) != md
->prev
;
684 static void perf_evlist__mmap_get(struct perf_evlist
*evlist
, int idx
)
686 ++evlist
->mmap
[idx
].refcnt
;
689 static void perf_evlist__mmap_put(struct perf_evlist
*evlist
, int idx
)
691 BUG_ON(evlist
->mmap
[idx
].refcnt
== 0);
693 if (--evlist
->mmap
[idx
].refcnt
== 0)
694 __perf_evlist__munmap(evlist
, idx
);
697 void perf_evlist__mmap_consume(struct perf_evlist
*evlist
, int idx
)
699 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
701 if (!evlist
->overwrite
) {
702 unsigned int old
= md
->prev
;
704 perf_mmap__write_tail(md
, old
);
707 if (md
->refcnt
== 1 && perf_mmap__empty(md
))
708 perf_evlist__mmap_put(evlist
, idx
);
711 static void __perf_evlist__munmap(struct perf_evlist
*evlist
, int idx
)
713 if (evlist
->mmap
[idx
].base
!= NULL
) {
714 munmap(evlist
->mmap
[idx
].base
, evlist
->mmap_len
);
715 evlist
->mmap
[idx
].base
= NULL
;
716 evlist
->mmap
[idx
].refcnt
= 0;
720 void perf_evlist__munmap(struct perf_evlist
*evlist
)
724 if (evlist
->mmap
== NULL
)
727 for (i
= 0; i
< evlist
->nr_mmaps
; i
++)
728 __perf_evlist__munmap(evlist
, i
);
730 zfree(&evlist
->mmap
);
733 static int perf_evlist__alloc_mmap(struct perf_evlist
*evlist
)
735 evlist
->nr_mmaps
= cpu_map__nr(evlist
->cpus
);
736 if (cpu_map__empty(evlist
->cpus
))
737 evlist
->nr_mmaps
= thread_map__nr(evlist
->threads
);
738 evlist
->mmap
= zalloc(evlist
->nr_mmaps
* sizeof(struct perf_mmap
));
739 return evlist
->mmap
!= NULL
? 0 : -ENOMEM
;
747 static int __perf_evlist__mmap(struct perf_evlist
*evlist
, int idx
,
748 struct mmap_params
*mp
, int fd
)
751 * The last one will be done at perf_evlist__mmap_consume(), so that we
752 * make sure we don't prevent tools from consuming every last event in
755 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
756 * anymore, but the last events for it are still in the ring buffer,
757 * waiting to be consumed.
759 * Tools can chose to ignore this at their own discretion, but the
760 * evlist layer can't just drop it when filtering events in
761 * perf_evlist__filter_pollfd().
763 evlist
->mmap
[idx
].refcnt
= 2;
764 evlist
->mmap
[idx
].prev
= 0;
765 evlist
->mmap
[idx
].mask
= mp
->mask
;
766 evlist
->mmap
[idx
].base
= mmap(NULL
, evlist
->mmap_len
, mp
->prot
,
768 if (evlist
->mmap
[idx
].base
== MAP_FAILED
) {
769 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
771 evlist
->mmap
[idx
].base
= NULL
;
778 static int perf_evlist__mmap_per_evsel(struct perf_evlist
*evlist
, int idx
,
779 struct mmap_params
*mp
, int cpu
,
780 int thread
, int *output
)
782 struct perf_evsel
*evsel
;
784 evlist__for_each(evlist
, evsel
) {
787 if (evsel
->system_wide
&& thread
)
790 fd
= FD(evsel
, cpu
, thread
);
794 if (__perf_evlist__mmap(evlist
, idx
, mp
, *output
) < 0)
797 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, *output
) != 0)
800 perf_evlist__mmap_get(evlist
, idx
);
803 if (__perf_evlist__add_pollfd(evlist
, fd
, idx
) < 0) {
804 perf_evlist__mmap_put(evlist
, idx
);
808 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
809 perf_evlist__id_add_fd(evlist
, evsel
, cpu
, thread
, fd
) < 0)
816 static int perf_evlist__mmap_per_cpu(struct perf_evlist
*evlist
,
817 struct mmap_params
*mp
)
820 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
821 int nr_threads
= thread_map__nr(evlist
->threads
);
823 pr_debug2("perf event ring buffer mmapped per cpu\n");
824 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
827 for (thread
= 0; thread
< nr_threads
; thread
++) {
828 if (perf_evlist__mmap_per_evsel(evlist
, cpu
, mp
, cpu
,
837 for (cpu
= 0; cpu
< nr_cpus
; cpu
++)
838 __perf_evlist__munmap(evlist
, cpu
);
842 static int perf_evlist__mmap_per_thread(struct perf_evlist
*evlist
,
843 struct mmap_params
*mp
)
846 int nr_threads
= thread_map__nr(evlist
->threads
);
848 pr_debug2("perf event ring buffer mmapped per thread\n");
849 for (thread
= 0; thread
< nr_threads
; thread
++) {
852 if (perf_evlist__mmap_per_evsel(evlist
, thread
, mp
, 0, thread
,
860 for (thread
= 0; thread
< nr_threads
; thread
++)
861 __perf_evlist__munmap(evlist
, thread
);
865 static size_t perf_evlist__mmap_size(unsigned long pages
)
867 /* 512 kiB: default amount of unprivileged mlocked memory */
868 if (pages
== UINT_MAX
)
869 pages
= (512 * 1024) / page_size
;
870 else if (!is_power_of_2(pages
))
873 return (pages
+ 1) * page_size
;
876 static long parse_pages_arg(const char *str
, unsigned long min
,
879 unsigned long pages
, val
;
880 static struct parse_tag tags
[] = {
881 { .tag
= 'B', .mult
= 1 },
882 { .tag
= 'K', .mult
= 1 << 10 },
883 { .tag
= 'M', .mult
= 1 << 20 },
884 { .tag
= 'G', .mult
= 1 << 30 },
891 val
= parse_tag_value(str
, tags
);
892 if (val
!= (unsigned long) -1) {
893 /* we got file size value */
894 pages
= PERF_ALIGN(val
, page_size
) / page_size
;
896 /* we got pages count value */
898 pages
= strtoul(str
, &eptr
, 10);
903 if (pages
== 0 && min
== 0) {
904 /* leave number of pages at 0 */
905 } else if (!is_power_of_2(pages
)) {
906 /* round pages up to next power of 2 */
907 pages
= next_pow2_l(pages
);
910 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
911 pages
* page_size
, pages
);
920 int perf_evlist__parse_mmap_pages(const struct option
*opt
, const char *str
,
921 int unset __maybe_unused
)
923 unsigned int *mmap_pages
= opt
->value
;
924 unsigned long max
= UINT_MAX
;
927 if (max
> SIZE_MAX
/ page_size
)
928 max
= SIZE_MAX
/ page_size
;
930 pages
= parse_pages_arg(str
, 1, max
);
932 pr_err("Invalid argument for --mmap_pages/-m\n");
941 * perf_evlist__mmap - Create mmaps to receive events.
942 * @evlist: list of events
943 * @pages: map length in pages
944 * @overwrite: overwrite older events?
946 * If @overwrite is %false the user needs to signal event consumption using
947 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
950 * Return: %0 on success, negative error code otherwise.
952 int perf_evlist__mmap(struct perf_evlist
*evlist
, unsigned int pages
,
955 struct perf_evsel
*evsel
;
956 const struct cpu_map
*cpus
= evlist
->cpus
;
957 const struct thread_map
*threads
= evlist
->threads
;
958 struct mmap_params mp
= {
959 .prot
= PROT_READ
| (overwrite
? 0 : PROT_WRITE
),
962 if (evlist
->mmap
== NULL
&& perf_evlist__alloc_mmap(evlist
) < 0)
965 if (evlist
->pollfd
.entries
== NULL
&& perf_evlist__alloc_pollfd(evlist
) < 0)
968 evlist
->overwrite
= overwrite
;
969 evlist
->mmap_len
= perf_evlist__mmap_size(pages
);
970 pr_debug("mmap size %zuB\n", evlist
->mmap_len
);
971 mp
.mask
= evlist
->mmap_len
- page_size
- 1;
973 evlist__for_each(evlist
, evsel
) {
974 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
975 evsel
->sample_id
== NULL
&&
976 perf_evsel__alloc_id(evsel
, cpu_map__nr(cpus
), threads
->nr
) < 0)
980 if (cpu_map__empty(cpus
))
981 return perf_evlist__mmap_per_thread(evlist
, &mp
);
983 return perf_evlist__mmap_per_cpu(evlist
, &mp
);
986 int perf_evlist__create_maps(struct perf_evlist
*evlist
, struct target
*target
)
988 evlist
->threads
= thread_map__new_str(target
->pid
, target
->tid
,
991 if (evlist
->threads
== NULL
)
994 if (target__uses_dummy_map(target
))
995 evlist
->cpus
= cpu_map__dummy_new();
997 evlist
->cpus
= cpu_map__new(target
->cpu_list
);
999 if (evlist
->cpus
== NULL
)
1000 goto out_delete_threads
;
1005 thread_map__delete(evlist
->threads
);
1009 int perf_evlist__apply_filters(struct perf_evlist
*evlist
)
1011 struct perf_evsel
*evsel
;
1013 const int ncpus
= cpu_map__nr(evlist
->cpus
),
1014 nthreads
= thread_map__nr(evlist
->threads
);
1016 evlist__for_each(evlist
, evsel
) {
1017 if (evsel
->filter
== NULL
)
1020 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, evsel
->filter
);
1028 int perf_evlist__set_filter(struct perf_evlist
*evlist
, const char *filter
)
1030 struct perf_evsel
*evsel
;
1032 const int ncpus
= cpu_map__nr(evlist
->cpus
),
1033 nthreads
= thread_map__nr(evlist
->threads
);
1035 evlist__for_each(evlist
, evsel
) {
1036 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, filter
);
1044 bool perf_evlist__valid_sample_type(struct perf_evlist
*evlist
)
1046 struct perf_evsel
*pos
;
1048 if (evlist
->nr_entries
== 1)
1051 if (evlist
->id_pos
< 0 || evlist
->is_pos
< 0)
1054 evlist__for_each(evlist
, pos
) {
1055 if (pos
->id_pos
!= evlist
->id_pos
||
1056 pos
->is_pos
!= evlist
->is_pos
)
1063 u64
__perf_evlist__combined_sample_type(struct perf_evlist
*evlist
)
1065 struct perf_evsel
*evsel
;
1067 if (evlist
->combined_sample_type
)
1068 return evlist
->combined_sample_type
;
1070 evlist__for_each(evlist
, evsel
)
1071 evlist
->combined_sample_type
|= evsel
->attr
.sample_type
;
1073 return evlist
->combined_sample_type
;
1076 u64
perf_evlist__combined_sample_type(struct perf_evlist
*evlist
)
1078 evlist
->combined_sample_type
= 0;
1079 return __perf_evlist__combined_sample_type(evlist
);
1082 bool perf_evlist__valid_read_format(struct perf_evlist
*evlist
)
1084 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
1085 u64 read_format
= first
->attr
.read_format
;
1086 u64 sample_type
= first
->attr
.sample_type
;
1088 evlist__for_each(evlist
, pos
) {
1089 if (read_format
!= pos
->attr
.read_format
)
1093 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1094 if ((sample_type
& PERF_SAMPLE_READ
) &&
1095 !(read_format
& PERF_FORMAT_ID
)) {
1102 u64
perf_evlist__read_format(struct perf_evlist
*evlist
)
1104 struct perf_evsel
*first
= perf_evlist__first(evlist
);
1105 return first
->attr
.read_format
;
1108 u16
perf_evlist__id_hdr_size(struct perf_evlist
*evlist
)
1110 struct perf_evsel
*first
= perf_evlist__first(evlist
);
1111 struct perf_sample
*data
;
1115 if (!first
->attr
.sample_id_all
)
1118 sample_type
= first
->attr
.sample_type
;
1120 if (sample_type
& PERF_SAMPLE_TID
)
1121 size
+= sizeof(data
->tid
) * 2;
1123 if (sample_type
& PERF_SAMPLE_TIME
)
1124 size
+= sizeof(data
->time
);
1126 if (sample_type
& PERF_SAMPLE_ID
)
1127 size
+= sizeof(data
->id
);
1129 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
1130 size
+= sizeof(data
->stream_id
);
1132 if (sample_type
& PERF_SAMPLE_CPU
)
1133 size
+= sizeof(data
->cpu
) * 2;
1135 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
1136 size
+= sizeof(data
->id
);
1141 bool perf_evlist__valid_sample_id_all(struct perf_evlist
*evlist
)
1143 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
1145 evlist__for_each_continue(evlist
, pos
) {
1146 if (first
->attr
.sample_id_all
!= pos
->attr
.sample_id_all
)
1153 bool perf_evlist__sample_id_all(struct perf_evlist
*evlist
)
1155 struct perf_evsel
*first
= perf_evlist__first(evlist
);
1156 return first
->attr
.sample_id_all
;
1159 void perf_evlist__set_selected(struct perf_evlist
*evlist
,
1160 struct perf_evsel
*evsel
)
1162 evlist
->selected
= evsel
;
1165 void perf_evlist__close(struct perf_evlist
*evlist
)
1167 struct perf_evsel
*evsel
;
1168 int ncpus
= cpu_map__nr(evlist
->cpus
);
1169 int nthreads
= thread_map__nr(evlist
->threads
);
1172 evlist__for_each_reverse(evlist
, evsel
) {
1173 n
= evsel
->cpus
? evsel
->cpus
->nr
: ncpus
;
1174 perf_evsel__close(evsel
, n
, nthreads
);
1178 int perf_evlist__open(struct perf_evlist
*evlist
)
1180 struct perf_evsel
*evsel
;
1183 perf_evlist__update_id_pos(evlist
);
1185 evlist__for_each(evlist
, evsel
) {
1186 err
= perf_evsel__open(evsel
, evlist
->cpus
, evlist
->threads
);
1193 perf_evlist__close(evlist
);
1198 int perf_evlist__prepare_workload(struct perf_evlist
*evlist
, struct target
*target
,
1199 const char *argv
[], bool pipe_output
,
1200 void (*exec_error
)(int signo
, siginfo_t
*info
, void *ucontext
))
1202 int child_ready_pipe
[2], go_pipe
[2];
1205 if (pipe(child_ready_pipe
) < 0) {
1206 perror("failed to create 'ready' pipe");
1210 if (pipe(go_pipe
) < 0) {
1211 perror("failed to create 'go' pipe");
1212 goto out_close_ready_pipe
;
1215 evlist
->workload
.pid
= fork();
1216 if (evlist
->workload
.pid
< 0) {
1217 perror("failed to fork");
1218 goto out_close_pipes
;
1221 if (!evlist
->workload
.pid
) {
1227 signal(SIGTERM
, SIG_DFL
);
1229 close(child_ready_pipe
[0]);
1231 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
1234 * Tell the parent we're ready to go
1236 close(child_ready_pipe
[1]);
1239 * Wait until the parent tells us to go.
1241 ret
= read(go_pipe
[0], &bf
, 1);
1243 * The parent will ask for the execvp() to be performed by
1244 * writing exactly one byte, in workload.cork_fd, usually via
1245 * perf_evlist__start_workload().
1247 * For cancelling the workload without actuallin running it,
1248 * the parent will just close workload.cork_fd, without writing
1249 * anything, i.e. read will return zero and we just exit()
1254 perror("unable to read pipe");
1258 execvp(argv
[0], (char **)argv
);
1263 val
.sival_int
= errno
;
1264 if (sigqueue(getppid(), SIGUSR1
, val
))
1272 struct sigaction act
= {
1273 .sa_flags
= SA_SIGINFO
,
1274 .sa_sigaction
= exec_error
,
1276 sigaction(SIGUSR1
, &act
, NULL
);
1279 if (target__none(target
))
1280 evlist
->threads
->map
[0] = evlist
->workload
.pid
;
1282 close(child_ready_pipe
[1]);
1285 * wait for child to settle
1287 if (read(child_ready_pipe
[0], &bf
, 1) == -1) {
1288 perror("unable to read pipe");
1289 goto out_close_pipes
;
1292 fcntl(go_pipe
[1], F_SETFD
, FD_CLOEXEC
);
1293 evlist
->workload
.cork_fd
= go_pipe
[1];
1294 close(child_ready_pipe
[0]);
1300 out_close_ready_pipe
:
1301 close(child_ready_pipe
[0]);
1302 close(child_ready_pipe
[1]);
1306 int perf_evlist__start_workload(struct perf_evlist
*evlist
)
1308 if (evlist
->workload
.cork_fd
> 0) {
1312 * Remove the cork, let it rip!
1314 ret
= write(evlist
->workload
.cork_fd
, &bf
, 1);
1316 perror("enable to write to pipe");
1318 close(evlist
->workload
.cork_fd
);
1325 int perf_evlist__parse_sample(struct perf_evlist
*evlist
, union perf_event
*event
,
1326 struct perf_sample
*sample
)
1328 struct perf_evsel
*evsel
= perf_evlist__event2evsel(evlist
, event
);
1332 return perf_evsel__parse_sample(evsel
, event
, sample
);
1335 size_t perf_evlist__fprintf(struct perf_evlist
*evlist
, FILE *fp
)
1337 struct perf_evsel
*evsel
;
1340 evlist__for_each(evlist
, evsel
) {
1341 printed
+= fprintf(fp
, "%s%s", evsel
->idx
? ", " : "",
1342 perf_evsel__name(evsel
));
1345 return printed
+ fprintf(fp
, "\n");
1348 int perf_evlist__strerror_tp(struct perf_evlist
*evlist __maybe_unused
,
1349 int err
, char *buf
, size_t size
)
1355 scnprintf(buf
, size
, "%s",
1356 "Error:\tUnable to find debugfs\n"
1357 "Hint:\tWas your kernel was compiled with debugfs support?\n"
1358 "Hint:\tIs the debugfs filesystem mounted?\n"
1359 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1362 scnprintf(buf
, size
,
1363 "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1364 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1365 debugfs_mountpoint
, debugfs_mountpoint
);
1368 scnprintf(buf
, size
, "%s", strerror_r(err
, sbuf
, sizeof(sbuf
)));
1375 int perf_evlist__strerror_open(struct perf_evlist
*evlist __maybe_unused
,
1376 int err
, char *buf
, size_t size
)
1379 char sbuf
[STRERR_BUFSIZE
], *emsg
= strerror_r(err
, sbuf
, sizeof(sbuf
));
1384 printed
= scnprintf(buf
, size
,
1386 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg
);
1388 value
= perf_event_paranoid();
1390 printed
+= scnprintf(buf
+ printed
, size
- printed
, "\nHint:\t");
1393 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1394 "For your workloads it needs to be <= 1\nHint:\t");
1396 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1397 "For system wide tracing it needs to be set to -1.\n");
1399 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1400 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1401 "Hint:\tThe current value is %d.", value
);
1404 scnprintf(buf
, size
, "%s", emsg
);
1411 void perf_evlist__to_front(struct perf_evlist
*evlist
,
1412 struct perf_evsel
*move_evsel
)
1414 struct perf_evsel
*evsel
, *n
;
1417 if (move_evsel
== perf_evlist__first(evlist
))
1420 evlist__for_each_safe(evlist
, n
, evsel
) {
1421 if (evsel
->leader
== move_evsel
->leader
)
1422 list_move_tail(&evsel
->node
, &move
);
1425 list_splice(&move
, &evlist
->entries
);
1428 void perf_evlist__set_tracking_event(struct perf_evlist
*evlist
,
1429 struct perf_evsel
*tracking_evsel
)
1431 struct perf_evsel
*evsel
;
1433 if (tracking_evsel
->tracking
)
1436 evlist__for_each(evlist
, evsel
) {
1437 if (evsel
!= tracking_evsel
)
1438 evsel
->tracking
= false;
1441 tracking_evsel
->tracking
= true;