perf evlist: Move destruction of maps to evlist destructor
[deliverable/linux.git] / tools / perf / util / evlist.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
a8c9ae18 9#include "util.h"
553873e1 10#include <api/fs/debugfs.h>
5c581041 11#include <poll.h>
f8a95309
ACM
12#include "cpumap.h"
13#include "thread_map.h"
12864b31 14#include "target.h"
361c99a6
ACM
15#include "evlist.h"
16#include "evsel.h"
e3e1a54f 17#include "debug.h"
35b9d88e 18#include <unistd.h>
361c99a6 19
50d08e47 20#include "parse-events.h"
994a1f78 21#include "parse-options.h"
50d08e47 22
f8a95309
ACM
23#include <sys/mman.h>
24
70db7533
ACM
25#include <linux/bitops.h>
26#include <linux/hash.h>
27
f8a95309 28#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
a91e5431 29#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
f8a95309 30
7e2ed097
ACM
31void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
32 struct thread_map *threads)
ef1d1af2
ACM
33{
34 int i;
35
36 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
37 INIT_HLIST_HEAD(&evlist->heads[i]);
38 INIT_LIST_HEAD(&evlist->entries);
7e2ed097 39 perf_evlist__set_maps(evlist, cpus, threads);
35b9d88e 40 evlist->workload.pid = -1;
ef1d1af2
ACM
41}
42
334fe7a3 43struct perf_evlist *perf_evlist__new(void)
361c99a6
ACM
44{
45 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
46
ef1d1af2 47 if (evlist != NULL)
334fe7a3 48 perf_evlist__init(evlist, NULL, NULL);
361c99a6
ACM
49
50 return evlist;
51}
52
b22d54b0
JO
53struct perf_evlist *perf_evlist__new_default(void)
54{
55 struct perf_evlist *evlist = perf_evlist__new();
56
57 if (evlist && perf_evlist__add_default(evlist)) {
58 perf_evlist__delete(evlist);
59 evlist = NULL;
60 }
61
62 return evlist;
63}
64
75562573
AH
65/**
66 * perf_evlist__set_id_pos - set the positions of event ids.
67 * @evlist: selected event list
68 *
69 * Events with compatible sample types all have the same id_pos
70 * and is_pos. For convenience, put a copy on evlist.
71 */
72void perf_evlist__set_id_pos(struct perf_evlist *evlist)
73{
74 struct perf_evsel *first = perf_evlist__first(evlist);
75
76 evlist->id_pos = first->id_pos;
77 evlist->is_pos = first->is_pos;
78}
79
733cd2fe
AH
80static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
81{
82 struct perf_evsel *evsel;
83
84 list_for_each_entry(evsel, &evlist->entries, node)
85 perf_evsel__calc_id_pos(evsel);
86
87 perf_evlist__set_id_pos(evlist);
88}
89
361c99a6
ACM
90static void perf_evlist__purge(struct perf_evlist *evlist)
91{
92 struct perf_evsel *pos, *n;
93
94 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
95 list_del_init(&pos->node);
96 perf_evsel__delete(pos);
97 }
98
99 evlist->nr_entries = 0;
100}
101
ef1d1af2 102void perf_evlist__exit(struct perf_evlist *evlist)
361c99a6 103{
04662523
ACM
104 zfree(&evlist->mmap);
105 zfree(&evlist->pollfd);
ef1d1af2
ACM
106}
107
108void perf_evlist__delete(struct perf_evlist *evlist)
109{
03ad9747
ACM
110 cpu_map__delete(evlist->cpus);
111 thread_map__delete(evlist->threads);
112 evlist->cpus = NULL;
113 evlist->threads = NULL;
ef1d1af2
ACM
114 perf_evlist__purge(evlist);
115 perf_evlist__exit(evlist);
361c99a6
ACM
116 free(evlist);
117}
118
119void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
120{
121 list_add_tail(&entry->node, &evlist->entries);
ef503831
ACM
122 entry->idx = evlist->nr_entries;
123
75562573
AH
124 if (!evlist->nr_entries++)
125 perf_evlist__set_id_pos(evlist);
361c99a6
ACM
126}
127
0529bc1f
JO
128void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
129 struct list_head *list,
130 int nr_entries)
50d08e47 131{
75562573
AH
132 bool set_id_pos = !evlist->nr_entries;
133
50d08e47
ACM
134 list_splice_tail(list, &evlist->entries);
135 evlist->nr_entries += nr_entries;
75562573
AH
136 if (set_id_pos)
137 perf_evlist__set_id_pos(evlist);
50d08e47
ACM
138}
139
63dab225
ACM
140void __perf_evlist__set_leader(struct list_head *list)
141{
142 struct perf_evsel *evsel, *leader;
143
144 leader = list_entry(list->next, struct perf_evsel, node);
97f63e4a
NK
145 evsel = list_entry(list->prev, struct perf_evsel, node);
146
147 leader->nr_members = evsel->idx - leader->idx + 1;
63dab225
ACM
148
149 list_for_each_entry(evsel, list, node) {
74b2133d 150 evsel->leader = leader;
63dab225
ACM
151 }
152}
153
154void perf_evlist__set_leader(struct perf_evlist *evlist)
6a4bb04c 155{
97f63e4a
NK
156 if (evlist->nr_entries) {
157 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
63dab225 158 __perf_evlist__set_leader(&evlist->entries);
97f63e4a 159 }
6a4bb04c
JO
160}
161
361c99a6
ACM
162int perf_evlist__add_default(struct perf_evlist *evlist)
163{
164 struct perf_event_attr attr = {
165 .type = PERF_TYPE_HARDWARE,
166 .config = PERF_COUNT_HW_CPU_CYCLES,
167 };
1aed2671
JR
168 struct perf_evsel *evsel;
169
170 event_attr_init(&attr);
361c99a6 171
ef503831 172 evsel = perf_evsel__new(&attr);
361c99a6 173 if (evsel == NULL)
cc2d86b0
SE
174 goto error;
175
176 /* use strdup() because free(evsel) assumes name is allocated */
177 evsel->name = strdup("cycles");
178 if (!evsel->name)
179 goto error_free;
361c99a6
ACM
180
181 perf_evlist__add(evlist, evsel);
182 return 0;
cc2d86b0
SE
183error_free:
184 perf_evsel__delete(evsel);
185error:
186 return -ENOMEM;
361c99a6 187}
5c581041 188
e60fc847
ACM
189static int perf_evlist__add_attrs(struct perf_evlist *evlist,
190 struct perf_event_attr *attrs, size_t nr_attrs)
50d08e47
ACM
191{
192 struct perf_evsel *evsel, *n;
193 LIST_HEAD(head);
194 size_t i;
195
196 for (i = 0; i < nr_attrs; i++) {
ef503831 197 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
50d08e47
ACM
198 if (evsel == NULL)
199 goto out_delete_partial_list;
200 list_add_tail(&evsel->node, &head);
201 }
202
203 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
204
205 return 0;
206
207out_delete_partial_list:
208 list_for_each_entry_safe(evsel, n, &head, node)
209 perf_evsel__delete(evsel);
210 return -1;
211}
212
79695e1b
ACM
213int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
214 struct perf_event_attr *attrs, size_t nr_attrs)
215{
216 size_t i;
217
218 for (i = 0; i < nr_attrs; i++)
219 event_attr_init(attrs + i);
220
221 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
222}
223
da378962
ACM
224struct perf_evsel *
225perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
ee29be62
ACM
226{
227 struct perf_evsel *evsel;
228
229 list_for_each_entry(evsel, &evlist->entries, node) {
230 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
231 (int)evsel->attr.config == id)
232 return evsel;
233 }
234
235 return NULL;
236}
237
a2f2804a
DA
238struct perf_evsel *
239perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
240 const char *name)
241{
242 struct perf_evsel *evsel;
243
244 list_for_each_entry(evsel, &evlist->entries, node) {
245 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
246 (strcmp(evsel->name, name) == 0))
247 return evsel;
248 }
249
250 return NULL;
251}
252
39876e7d
ACM
253int perf_evlist__add_newtp(struct perf_evlist *evlist,
254 const char *sys, const char *name, void *handler)
255{
ef503831 256 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
39876e7d 257
39876e7d
ACM
258 if (evsel == NULL)
259 return -1;
260
744a9719 261 evsel->handler = handler;
39876e7d
ACM
262 perf_evlist__add(evlist, evsel);
263 return 0;
264}
265
4152ab37
ACM
266void perf_evlist__disable(struct perf_evlist *evlist)
267{
268 int cpu, thread;
269 struct perf_evsel *pos;
b3a319d5
NK
270 int nr_cpus = cpu_map__nr(evlist->cpus);
271 int nr_threads = thread_map__nr(evlist->threads);
4152ab37 272
b3a319d5 273 for (cpu = 0; cpu < nr_cpus; cpu++) {
4152ab37 274 list_for_each_entry(pos, &evlist->entries, node) {
395c3070 275 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 276 continue;
b3a319d5 277 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
278 ioctl(FD(pos, cpu, thread),
279 PERF_EVENT_IOC_DISABLE, 0);
4152ab37
ACM
280 }
281 }
282}
283
764e16a3
DA
284void perf_evlist__enable(struct perf_evlist *evlist)
285{
286 int cpu, thread;
287 struct perf_evsel *pos;
b3a319d5
NK
288 int nr_cpus = cpu_map__nr(evlist->cpus);
289 int nr_threads = thread_map__nr(evlist->threads);
764e16a3 290
b3a319d5 291 for (cpu = 0; cpu < nr_cpus; cpu++) {
764e16a3 292 list_for_each_entry(pos, &evlist->entries, node) {
395c3070 293 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 294 continue;
b3a319d5 295 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
296 ioctl(FD(pos, cpu, thread),
297 PERF_EVENT_IOC_ENABLE, 0);
764e16a3
DA
298 }
299 }
300}
301
395c3070
AH
302int perf_evlist__disable_event(struct perf_evlist *evlist,
303 struct perf_evsel *evsel)
304{
305 int cpu, thread, err;
306
307 if (!evsel->fd)
308 return 0;
309
310 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
311 for (thread = 0; thread < evlist->threads->nr; thread++) {
312 err = ioctl(FD(evsel, cpu, thread),
313 PERF_EVENT_IOC_DISABLE, 0);
314 if (err)
315 return err;
316 }
317 }
318 return 0;
319}
320
321int perf_evlist__enable_event(struct perf_evlist *evlist,
322 struct perf_evsel *evsel)
323{
324 int cpu, thread, err;
325
326 if (!evsel->fd)
327 return -EINVAL;
328
329 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
330 for (thread = 0; thread < evlist->threads->nr; thread++) {
331 err = ioctl(FD(evsel, cpu, thread),
332 PERF_EVENT_IOC_ENABLE, 0);
333 if (err)
334 return err;
335 }
336 }
337 return 0;
338}
339
806fb630 340static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
5c581041 341{
b3a319d5
NK
342 int nr_cpus = cpu_map__nr(evlist->cpus);
343 int nr_threads = thread_map__nr(evlist->threads);
344 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
5c581041
ACM
345 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
346 return evlist->pollfd != NULL ? 0 : -ENOMEM;
347}
70082dd9
ACM
348
349void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
350{
351 fcntl(fd, F_SETFL, O_NONBLOCK);
352 evlist->pollfd[evlist->nr_fds].fd = fd;
353 evlist->pollfd[evlist->nr_fds].events = POLLIN;
354 evlist->nr_fds++;
355}
70db7533 356
a91e5431
ACM
357static void perf_evlist__id_hash(struct perf_evlist *evlist,
358 struct perf_evsel *evsel,
359 int cpu, int thread, u64 id)
3d3b5e95
ACM
360{
361 int hash;
362 struct perf_sample_id *sid = SID(evsel, cpu, thread);
363
364 sid->id = id;
365 sid->evsel = evsel;
366 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
367 hlist_add_head(&sid->node, &evlist->heads[hash]);
368}
369
a91e5431
ACM
370void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
371 int cpu, int thread, u64 id)
372{
373 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
374 evsel->id[evsel->ids++] = id;
375}
376
377static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
378 struct perf_evsel *evsel,
379 int cpu, int thread, int fd)
f8a95309 380{
f8a95309 381 u64 read_data[4] = { 0, };
3d3b5e95 382 int id_idx = 1; /* The first entry is the counter value */
e2b5abe0
JO
383 u64 id;
384 int ret;
385
386 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
387 if (!ret)
388 goto add;
389
390 if (errno != ENOTTY)
391 return -1;
392
393 /* Legacy way to get event id.. All hail to old kernels! */
f8a95309 394
c4861afe
JO
395 /*
396 * This way does not work with group format read, so bail
397 * out in that case.
398 */
399 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
400 return -1;
401
f8a95309
ACM
402 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
403 read(fd, &read_data, sizeof(read_data)) == -1)
404 return -1;
405
406 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
407 ++id_idx;
408 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
409 ++id_idx;
410
e2b5abe0
JO
411 id = read_data[id_idx];
412
413 add:
414 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
f8a95309
ACM
415 return 0;
416}
417
932a3594 418struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
70db7533
ACM
419{
420 struct hlist_head *head;
70db7533
ACM
421 struct perf_sample_id *sid;
422 int hash;
423
70db7533
ACM
424 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
425 head = &evlist->heads[hash];
426
b67bfe0d 427 hlist_for_each_entry(sid, head, node)
70db7533 428 if (sid->id == id)
932a3594
JO
429 return sid;
430
431 return NULL;
432}
433
434struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
435{
436 struct perf_sample_id *sid;
437
438 if (evlist->nr_entries == 1)
439 return perf_evlist__first(evlist);
440
441 sid = perf_evlist__id2sid(evlist, id);
442 if (sid)
443 return sid->evsel;
30e68bcc
NK
444
445 if (!perf_evlist__sample_id_all(evlist))
0c21f736 446 return perf_evlist__first(evlist);
30e68bcc 447
70db7533
ACM
448 return NULL;
449}
04391deb 450
75562573
AH
451static int perf_evlist__event2id(struct perf_evlist *evlist,
452 union perf_event *event, u64 *id)
453{
454 const u64 *array = event->sample.array;
455 ssize_t n;
456
457 n = (event->header.size - sizeof(event->header)) >> 3;
458
459 if (event->header.type == PERF_RECORD_SAMPLE) {
460 if (evlist->id_pos >= n)
461 return -1;
462 *id = array[evlist->id_pos];
463 } else {
464 if (evlist->is_pos > n)
465 return -1;
466 n -= evlist->is_pos;
467 *id = array[n];
468 }
469 return 0;
470}
471
472static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
473 union perf_event *event)
474{
98be6966 475 struct perf_evsel *first = perf_evlist__first(evlist);
75562573
AH
476 struct hlist_head *head;
477 struct perf_sample_id *sid;
478 int hash;
479 u64 id;
480
481 if (evlist->nr_entries == 1)
98be6966
AH
482 return first;
483
484 if (!first->attr.sample_id_all &&
485 event->header.type != PERF_RECORD_SAMPLE)
486 return first;
75562573
AH
487
488 if (perf_evlist__event2id(evlist, event, &id))
489 return NULL;
490
491 /* Synthesized events have an id of zero */
492 if (!id)
98be6966 493 return first;
75562573
AH
494
495 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
496 head = &evlist->heads[hash];
497
498 hlist_for_each_entry(sid, head, node) {
499 if (sid->id == id)
500 return sid->evsel;
501 }
502 return NULL;
503}
504
aece948f 505union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
04391deb 506{
aece948f 507 struct perf_mmap *md = &evlist->mmap[idx];
04391deb
ACM
508 unsigned int head = perf_mmap__read_head(md);
509 unsigned int old = md->prev;
510 unsigned char *data = md->base + page_size;
8115d60c 511 union perf_event *event = NULL;
04391deb 512
7bb41152 513 if (evlist->overwrite) {
04391deb 514 /*
7bb41152
ACM
515 * If we're further behind than half the buffer, there's a chance
516 * the writer will bite our tail and mess up the samples under us.
517 *
518 * If we somehow ended up ahead of the head, we got messed up.
519 *
520 * In either case, truncate and restart at head.
04391deb 521 */
7bb41152
ACM
522 int diff = head - old;
523 if (diff > md->mask / 2 || diff < 0) {
524 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
525
526 /*
527 * head points to a known good entry, start there.
528 */
529 old = head;
530 }
04391deb
ACM
531 }
532
533 if (old != head) {
534 size_t size;
535
8115d60c 536 event = (union perf_event *)&data[old & md->mask];
04391deb
ACM
537 size = event->header.size;
538
539 /*
540 * Event straddles the mmap boundary -- header should always
541 * be inside due to u64 alignment of output.
542 */
543 if ((old & md->mask) + size != ((old + size) & md->mask)) {
544 unsigned int offset = old;
545 unsigned int len = min(sizeof(*event), size), cpy;
a65cb4b9 546 void *dst = md->event_copy;
04391deb
ACM
547
548 do {
549 cpy = min(md->mask + 1 - (offset & md->mask), len);
550 memcpy(dst, &data[offset & md->mask], cpy);
551 offset += cpy;
552 dst += cpy;
553 len -= cpy;
554 } while (len);
555
a65cb4b9 556 event = (union perf_event *) md->event_copy;
04391deb
ACM
557 }
558
559 old += size;
560 }
561
562 md->prev = old;
7bb41152 563
04391deb
ACM
564 return event;
565}
f8a95309 566
8e50d384
ZZ
567void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
568{
569 if (!evlist->overwrite) {
570 struct perf_mmap *md = &evlist->mmap[idx];
571 unsigned int old = md->prev;
572
573 perf_mmap__write_tail(md, old);
574 }
575}
576
93edcbd9
AH
577static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
578{
579 if (evlist->mmap[idx].base != NULL) {
580 munmap(evlist->mmap[idx].base, evlist->mmap_len);
581 evlist->mmap[idx].base = NULL;
582 }
583}
584
7e2ed097 585void perf_evlist__munmap(struct perf_evlist *evlist)
f8a95309 586{
aece948f 587 int i;
f8a95309 588
93edcbd9
AH
589 for (i = 0; i < evlist->nr_mmaps; i++)
590 __perf_evlist__munmap(evlist, i);
aece948f 591
04662523 592 zfree(&evlist->mmap);
f8a95309
ACM
593}
594
806fb630 595static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
f8a95309 596{
a14bb7a6 597 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
ec1e7e43 598 if (cpu_map__empty(evlist->cpus))
b3a319d5 599 evlist->nr_mmaps = thread_map__nr(evlist->threads);
aece948f 600 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
f8a95309
ACM
601 return evlist->mmap != NULL ? 0 : -ENOMEM;
602}
603
bccdaba0 604static int __perf_evlist__mmap(struct perf_evlist *evlist,
aece948f 605 int idx, int prot, int mask, int fd)
f8a95309 606{
aece948f
ACM
607 evlist->mmap[idx].prev = 0;
608 evlist->mmap[idx].mask = mask;
609 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
f8a95309 610 MAP_SHARED, fd, 0);
301b195d 611 if (evlist->mmap[idx].base == MAP_FAILED) {
02635965
AH
612 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
613 errno);
301b195d 614 evlist->mmap[idx].base = NULL;
f8a95309 615 return -1;
301b195d 616 }
f8a95309
ACM
617
618 perf_evlist__add_pollfd(evlist, fd);
619 return 0;
620}
621
04e21314
AH
622static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
623 int prot, int mask, int cpu, int thread,
624 int *output)
aece948f
ACM
625{
626 struct perf_evsel *evsel;
04e21314
AH
627
628 list_for_each_entry(evsel, &evlist->entries, node) {
629 int fd = FD(evsel, cpu, thread);
630
631 if (*output == -1) {
632 *output = fd;
633 if (__perf_evlist__mmap(evlist, idx, prot, mask,
634 *output) < 0)
635 return -1;
636 } else {
637 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
638 return -1;
639 }
640
641 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
642 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
643 return -1;
644 }
645
646 return 0;
647}
648
649static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
650 int mask)
651{
aece948f 652 int cpu, thread;
b3a319d5
NK
653 int nr_cpus = cpu_map__nr(evlist->cpus);
654 int nr_threads = thread_map__nr(evlist->threads);
aece948f 655
e3e1a54f 656 pr_debug2("perf event ring buffer mmapped per cpu\n");
b3a319d5 657 for (cpu = 0; cpu < nr_cpus; cpu++) {
aece948f
ACM
658 int output = -1;
659
b3a319d5 660 for (thread = 0; thread < nr_threads; thread++) {
04e21314
AH
661 if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
662 cpu, thread, &output))
663 goto out_unmap;
aece948f
ACM
664 }
665 }
666
667 return 0;
668
669out_unmap:
93edcbd9
AH
670 for (cpu = 0; cpu < nr_cpus; cpu++)
671 __perf_evlist__munmap(evlist, cpu);
aece948f
ACM
672 return -1;
673}
674
04e21314
AH
675static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
676 int mask)
aece948f 677{
aece948f 678 int thread;
b3a319d5 679 int nr_threads = thread_map__nr(evlist->threads);
aece948f 680
e3e1a54f 681 pr_debug2("perf event ring buffer mmapped per thread\n");
b3a319d5 682 for (thread = 0; thread < nr_threads; thread++) {
aece948f
ACM
683 int output = -1;
684
04e21314
AH
685 if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
686 thread, &output))
687 goto out_unmap;
aece948f
ACM
688 }
689
690 return 0;
691
692out_unmap:
93edcbd9
AH
693 for (thread = 0; thread < nr_threads; thread++)
694 __perf_evlist__munmap(evlist, thread);
aece948f
ACM
695 return -1;
696}
697
994a1f78
JO
698static size_t perf_evlist__mmap_size(unsigned long pages)
699{
700 /* 512 kiB: default amount of unprivileged mlocked memory */
701 if (pages == UINT_MAX)
702 pages = (512 * 1024) / page_size;
703 else if (!is_power_of_2(pages))
704 return 0;
705
706 return (pages + 1) * page_size;
707}
708
33c2dcfd
DA
709static long parse_pages_arg(const char *str, unsigned long min,
710 unsigned long max)
994a1f78 711{
2fbe4abe 712 unsigned long pages, val;
27050f53
JO
713 static struct parse_tag tags[] = {
714 { .tag = 'B', .mult = 1 },
715 { .tag = 'K', .mult = 1 << 10 },
716 { .tag = 'M', .mult = 1 << 20 },
717 { .tag = 'G', .mult = 1 << 30 },
718 { .tag = 0 },
719 };
994a1f78 720
8973504b 721 if (str == NULL)
33c2dcfd 722 return -EINVAL;
8973504b 723
27050f53 724 val = parse_tag_value(str, tags);
2fbe4abe 725 if (val != (unsigned long) -1) {
27050f53
JO
726 /* we got file size value */
727 pages = PERF_ALIGN(val, page_size) / page_size;
27050f53
JO
728 } else {
729 /* we got pages count value */
730 char *eptr;
731 pages = strtoul(str, &eptr, 10);
33c2dcfd
DA
732 if (*eptr != '\0')
733 return -EINVAL;
994a1f78
JO
734 }
735
2bcab6c1 736 if (pages == 0 && min == 0) {
33c2dcfd 737 /* leave number of pages at 0 */
1dbfa938 738 } else if (!is_power_of_2(pages)) {
33c2dcfd 739 /* round pages up to next power of 2 */
1dbfa938
AH
740 pages = next_pow2_l(pages);
741 if (!pages)
742 return -EINVAL;
9639837e
DA
743 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
744 pages * page_size, pages);
2fbe4abe
AH
745 }
746
33c2dcfd
DA
747 if (pages > max)
748 return -EINVAL;
749
750 return pages;
751}
752
753int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
754 int unset __maybe_unused)
755{
756 unsigned int *mmap_pages = opt->value;
757 unsigned long max = UINT_MAX;
758 long pages;
759
f5ae9c42 760 if (max > SIZE_MAX / page_size)
33c2dcfd
DA
761 max = SIZE_MAX / page_size;
762
763 pages = parse_pages_arg(str, 1, max);
764 if (pages < 0) {
765 pr_err("Invalid argument for --mmap_pages/-m\n");
994a1f78
JO
766 return -1;
767 }
768
769 *mmap_pages = pages;
770 return 0;
771}
772
c83fa7f2
AH
773/**
774 * perf_evlist__mmap - Create mmaps to receive events.
775 * @evlist: list of events
776 * @pages: map length in pages
777 * @overwrite: overwrite older events?
f8a95309 778 *
c83fa7f2
AH
779 * If @overwrite is %false the user needs to signal event consumption using
780 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
781 * automatically.
7e2ed097 782 *
c83fa7f2 783 * Return: %0 on success, negative error code otherwise.
f8a95309 784 */
50a682ce
ACM
785int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
786 bool overwrite)
f8a95309 787{
aece948f 788 struct perf_evsel *evsel;
7e2ed097
ACM
789 const struct cpu_map *cpus = evlist->cpus;
790 const struct thread_map *threads = evlist->threads;
50a682ce
ACM
791 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
792
7e2ed097 793 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
f8a95309
ACM
794 return -ENOMEM;
795
7e2ed097 796 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
f8a95309
ACM
797 return -ENOMEM;
798
799 evlist->overwrite = overwrite;
994a1f78 800 evlist->mmap_len = perf_evlist__mmap_size(pages);
2af68ef5 801 pr_debug("mmap size %zuB\n", evlist->mmap_len);
994a1f78 802 mask = evlist->mmap_len - page_size - 1;
f8a95309
ACM
803
804 list_for_each_entry(evsel, &evlist->entries, node) {
805 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
a91e5431 806 evsel->sample_id == NULL &&
a14bb7a6 807 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
f8a95309 808 return -ENOMEM;
f8a95309
ACM
809 }
810
ec1e7e43 811 if (cpu_map__empty(cpus))
aece948f 812 return perf_evlist__mmap_per_thread(evlist, prot, mask);
f8a95309 813
aece948f 814 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
f8a95309 815}
7e2ed097 816
602ad878 817int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
7e2ed097 818{
b809ac10
NK
819 evlist->threads = thread_map__new_str(target->pid, target->tid,
820 target->uid);
7e2ed097
ACM
821
822 if (evlist->threads == NULL)
823 return -1;
824
9c105fbc 825 if (target__uses_dummy_map(target))
d1cb9fce 826 evlist->cpus = cpu_map__dummy_new();
879d77d0
NK
827 else
828 evlist->cpus = cpu_map__new(target->cpu_list);
7e2ed097
ACM
829
830 if (evlist->cpus == NULL)
831 goto out_delete_threads;
832
833 return 0;
834
835out_delete_threads:
836 thread_map__delete(evlist->threads);
837 return -1;
838}
839
1491a632 840int perf_evlist__apply_filters(struct perf_evlist *evlist)
0a102479 841{
0a102479 842 struct perf_evsel *evsel;
745cefc5
ACM
843 int err = 0;
844 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 845 nthreads = thread_map__nr(evlist->threads);
0a102479
FW
846
847 list_for_each_entry(evsel, &evlist->entries, node) {
745cefc5 848 if (evsel->filter == NULL)
0a102479 849 continue;
745cefc5
ACM
850
851 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
852 if (err)
853 break;
0a102479
FW
854 }
855
745cefc5
ACM
856 return err;
857}
858
859int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
860{
861 struct perf_evsel *evsel;
862 int err = 0;
863 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 864 nthreads = thread_map__nr(evlist->threads);
745cefc5
ACM
865
866 list_for_each_entry(evsel, &evlist->entries, node) {
867 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
868 if (err)
869 break;
870 }
871
872 return err;
0a102479 873}
74429964 874
0c21f736 875bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
74429964 876{
75562573 877 struct perf_evsel *pos;
c2a70653 878
75562573
AH
879 if (evlist->nr_entries == 1)
880 return true;
881
882 if (evlist->id_pos < 0 || evlist->is_pos < 0)
883 return false;
884
885 list_for_each_entry(pos, &evlist->entries, node) {
886 if (pos->id_pos != evlist->id_pos ||
887 pos->is_pos != evlist->is_pos)
c2a70653 888 return false;
74429964
FW
889 }
890
c2a70653 891 return true;
74429964
FW
892}
893
75562573 894u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
c2a70653 895{
75562573
AH
896 struct perf_evsel *evsel;
897
898 if (evlist->combined_sample_type)
899 return evlist->combined_sample_type;
900
901 list_for_each_entry(evsel, &evlist->entries, node)
902 evlist->combined_sample_type |= evsel->attr.sample_type;
903
904 return evlist->combined_sample_type;
905}
906
907u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
908{
909 evlist->combined_sample_type = 0;
910 return __perf_evlist__combined_sample_type(evlist);
c2a70653
ACM
911}
912
9ede473c
JO
913bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
914{
915 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
916 u64 read_format = first->attr.read_format;
917 u64 sample_type = first->attr.sample_type;
918
919 list_for_each_entry_continue(pos, &evlist->entries, node) {
920 if (read_format != pos->attr.read_format)
921 return false;
922 }
923
924 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
925 if ((sample_type & PERF_SAMPLE_READ) &&
926 !(read_format & PERF_FORMAT_ID)) {
927 return false;
928 }
929
930 return true;
931}
932
933u64 perf_evlist__read_format(struct perf_evlist *evlist)
934{
935 struct perf_evsel *first = perf_evlist__first(evlist);
936 return first->attr.read_format;
937}
938
0c21f736 939u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
81e36bff 940{
0c21f736 941 struct perf_evsel *first = perf_evlist__first(evlist);
81e36bff
ACM
942 struct perf_sample *data;
943 u64 sample_type;
944 u16 size = 0;
945
81e36bff
ACM
946 if (!first->attr.sample_id_all)
947 goto out;
948
949 sample_type = first->attr.sample_type;
950
951 if (sample_type & PERF_SAMPLE_TID)
952 size += sizeof(data->tid) * 2;
953
954 if (sample_type & PERF_SAMPLE_TIME)
955 size += sizeof(data->time);
956
957 if (sample_type & PERF_SAMPLE_ID)
958 size += sizeof(data->id);
959
960 if (sample_type & PERF_SAMPLE_STREAM_ID)
961 size += sizeof(data->stream_id);
962
963 if (sample_type & PERF_SAMPLE_CPU)
964 size += sizeof(data->cpu) * 2;
75562573
AH
965
966 if (sample_type & PERF_SAMPLE_IDENTIFIER)
967 size += sizeof(data->id);
81e36bff
ACM
968out:
969 return size;
970}
971
0c21f736 972bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
74429964 973{
0c21f736 974 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653
ACM
975
976 list_for_each_entry_continue(pos, &evlist->entries, node) {
977 if (first->attr.sample_id_all != pos->attr.sample_id_all)
978 return false;
74429964
FW
979 }
980
c2a70653
ACM
981 return true;
982}
983
0c21f736 984bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
c2a70653 985{
0c21f736 986 struct perf_evsel *first = perf_evlist__first(evlist);
c2a70653 987 return first->attr.sample_id_all;
74429964 988}
81cce8de
ACM
989
990void perf_evlist__set_selected(struct perf_evlist *evlist,
991 struct perf_evsel *evsel)
992{
993 evlist->selected = evsel;
994}
727ab04e 995
a74b4b66
NK
996void perf_evlist__close(struct perf_evlist *evlist)
997{
998 struct perf_evsel *evsel;
999 int ncpus = cpu_map__nr(evlist->cpus);
1000 int nthreads = thread_map__nr(evlist->threads);
1001
1002 list_for_each_entry_reverse(evsel, &evlist->entries, node)
1003 perf_evsel__close(evsel, ncpus, nthreads);
1004}
1005
6a4bb04c 1006int perf_evlist__open(struct perf_evlist *evlist)
727ab04e 1007{
6a4bb04c 1008 struct perf_evsel *evsel;
a74b4b66 1009 int err;
727ab04e 1010
733cd2fe
AH
1011 perf_evlist__update_id_pos(evlist);
1012
727ab04e 1013 list_for_each_entry(evsel, &evlist->entries, node) {
6a4bb04c 1014 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
727ab04e
ACM
1015 if (err < 0)
1016 goto out_err;
1017 }
1018
1019 return 0;
1020out_err:
a74b4b66 1021 perf_evlist__close(evlist);
41c21a68 1022 errno = -err;
727ab04e
ACM
1023 return err;
1024}
35b9d88e 1025
602ad878 1026int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
55e162ea 1027 const char *argv[], bool pipe_output,
735f7e0b 1028 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
35b9d88e
ACM
1029{
1030 int child_ready_pipe[2], go_pipe[2];
1031 char bf;
1032
1033 if (pipe(child_ready_pipe) < 0) {
1034 perror("failed to create 'ready' pipe");
1035 return -1;
1036 }
1037
1038 if (pipe(go_pipe) < 0) {
1039 perror("failed to create 'go' pipe");
1040 goto out_close_ready_pipe;
1041 }
1042
1043 evlist->workload.pid = fork();
1044 if (evlist->workload.pid < 0) {
1045 perror("failed to fork");
1046 goto out_close_pipes;
1047 }
1048
1049 if (!evlist->workload.pid) {
119fa3c9 1050 if (pipe_output)
35b9d88e
ACM
1051 dup2(2, 1);
1052
0817df08
DA
1053 signal(SIGTERM, SIG_DFL);
1054
35b9d88e
ACM
1055 close(child_ready_pipe[0]);
1056 close(go_pipe[1]);
1057 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1058
35b9d88e
ACM
1059 /*
1060 * Tell the parent we're ready to go
1061 */
1062 close(child_ready_pipe[1]);
1063
1064 /*
1065 * Wait until the parent tells us to go.
1066 */
1067 if (read(go_pipe[0], &bf, 1) == -1)
1068 perror("unable to read pipe");
1069
1070 execvp(argv[0], (char **)argv);
1071
735f7e0b 1072 if (exec_error) {
f33cbe72
ACM
1073 union sigval val;
1074
1075 val.sival_int = errno;
1076 if (sigqueue(getppid(), SIGUSR1, val))
1077 perror(argv[0]);
1078 } else
1079 perror(argv[0]);
35b9d88e
ACM
1080 exit(-1);
1081 }
1082
735f7e0b
ACM
1083 if (exec_error) {
1084 struct sigaction act = {
1085 .sa_flags = SA_SIGINFO,
1086 .sa_sigaction = exec_error,
1087 };
1088 sigaction(SIGUSR1, &act, NULL);
1089 }
1090
602ad878 1091 if (target__none(target))
35b9d88e
ACM
1092 evlist->threads->map[0] = evlist->workload.pid;
1093
1094 close(child_ready_pipe[1]);
1095 close(go_pipe[0]);
1096 /*
1097 * wait for child to settle
1098 */
1099 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1100 perror("unable to read pipe");
1101 goto out_close_pipes;
1102 }
1103
bcf3145f 1104 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
35b9d88e
ACM
1105 evlist->workload.cork_fd = go_pipe[1];
1106 close(child_ready_pipe[0]);
1107 return 0;
1108
1109out_close_pipes:
1110 close(go_pipe[0]);
1111 close(go_pipe[1]);
1112out_close_ready_pipe:
1113 close(child_ready_pipe[0]);
1114 close(child_ready_pipe[1]);
1115 return -1;
1116}
1117
1118int perf_evlist__start_workload(struct perf_evlist *evlist)
1119{
1120 if (evlist->workload.cork_fd > 0) {
b3824404 1121 char bf = 0;
bcf3145f 1122 int ret;
35b9d88e
ACM
1123 /*
1124 * Remove the cork, let it rip!
1125 */
bcf3145f
NK
1126 ret = write(evlist->workload.cork_fd, &bf, 1);
1127 if (ret < 0)
1128 perror("enable to write to pipe");
1129
1130 close(evlist->workload.cork_fd);
1131 return ret;
35b9d88e
ACM
1132 }
1133
1134 return 0;
1135}
cb0b29e0 1136
a3f698fe 1137int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
0807d2d8 1138 struct perf_sample *sample)
cb0b29e0 1139{
75562573
AH
1140 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1141
1142 if (!evsel)
1143 return -EFAULT;
0807d2d8 1144 return perf_evsel__parse_sample(evsel, event, sample);
cb0b29e0 1145}
78f067b3
ACM
1146
1147size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1148{
1149 struct perf_evsel *evsel;
1150 size_t printed = 0;
1151
1152 list_for_each_entry(evsel, &evlist->entries, node) {
1153 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1154 perf_evsel__name(evsel));
1155 }
1156
b2222139 1157 return printed + fprintf(fp, "\n");
78f067b3 1158}
6ef068cb
ACM
1159
1160int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
1161 int err, char *buf, size_t size)
1162{
1163 char sbuf[128];
1164
1165 switch (err) {
1166 case ENOENT:
1167 scnprintf(buf, size, "%s",
1168 "Error:\tUnable to find debugfs\n"
1169 "Hint:\tWas your kernel was compiled with debugfs support?\n"
1170 "Hint:\tIs the debugfs filesystem mounted?\n"
1171 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1172 break;
1173 case EACCES:
1174 scnprintf(buf, size,
1175 "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1176 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1177 debugfs_mountpoint, debugfs_mountpoint);
1178 break;
1179 default:
1180 scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
1181 break;
1182 }
1183
1184 return 0;
1185}
a8f23d8f
ACM
1186
1187int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1188 int err, char *buf, size_t size)
1189{
1190 int printed, value;
1191 char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1192
1193 switch (err) {
1194 case EACCES:
1195 case EPERM:
1196 printed = scnprintf(buf, size,
1197 "Error:\t%s.\n"
1198 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1199
1a47245d 1200 value = perf_event_paranoid();
a8f23d8f
ACM
1201
1202 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1203
1204 if (value >= 2) {
1205 printed += scnprintf(buf + printed, size - printed,
1206 "For your workloads it needs to be <= 1\nHint:\t");
1207 }
1208 printed += scnprintf(buf + printed, size - printed,
1209 "For system wide tracing it needs to be set to -1");
1210
1211 printed += scnprintf(buf + printed, size - printed,
1212 ".\nHint:\tThe current value is %d.", value);
1213 break;
1214 default:
1215 scnprintf(buf, size, "%s", emsg);
1216 break;
1217 }
1218
1219 return 0;
1220}
a025e4f0
AH
1221
1222void perf_evlist__to_front(struct perf_evlist *evlist,
1223 struct perf_evsel *move_evsel)
1224{
1225 struct perf_evsel *evsel, *n;
1226 LIST_HEAD(move);
1227
1228 if (move_evsel == perf_evlist__first(evlist))
1229 return;
1230
1231 list_for_each_entry_safe(evsel, n, &evlist->entries, node) {
1232 if (evsel->leader == move_evsel->leader)
1233 list_move_tail(&evsel->node, &move);
1234 }
1235
1236 list_splice(&move, &evlist->entries);
1237}
This page took 0.297892 seconds and 5 git commands to generate.