perf tools: Move event__parse_sample to evsel.c
[deliverable/linux.git] / tools / perf / util / evsel.c
CommitLineData
69aad6f1 1#include "evsel.h"
70082dd9 2#include "evlist.h"
48290609 3#include "../perf.h"
69aad6f1 4#include "util.h"
86bd5e86 5#include "cpumap.h"
fd78260b 6#include "thread_map.h"
69aad6f1 7
70082dd9
ACM
8#include <unistd.h>
9#include <sys/mman.h>
10
70db7533
ACM
11#include <linux/bitops.h>
12#include <linux/hash.h>
13
c52b12ed 14#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
70db7533 15#define SID(e, x, y) xyarray__entry(e->id, x, y)
c52b12ed 16
23a2f3ab 17struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
69aad6f1
ACM
18{
19 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
20
21 if (evsel != NULL) {
22 evsel->idx = idx;
23a2f3ab 23 evsel->attr = *attr;
69aad6f1
ACM
24 INIT_LIST_HEAD(&evsel->node);
25 }
26
27 return evsel;
28}
29
30int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
31{
32 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
33 return evsel->fd != NULL ? 0 : -ENOMEM;
34}
35
70db7533
ACM
36int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
37{
38 evsel->id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
39 return evsel->id != NULL ? 0 : -ENOMEM;
40}
41
c52b12ed
ACM
42int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
43{
44 evsel->counts = zalloc((sizeof(*evsel->counts) +
45 (ncpus * sizeof(struct perf_counts_values))));
46 return evsel->counts != NULL ? 0 : -ENOMEM;
47}
48
69aad6f1
ACM
49void perf_evsel__free_fd(struct perf_evsel *evsel)
50{
51 xyarray__delete(evsel->fd);
52 evsel->fd = NULL;
53}
54
70db7533
ACM
55void perf_evsel__free_id(struct perf_evsel *evsel)
56{
57 xyarray__delete(evsel->id);
58 evsel->id = NULL;
59}
60
c52b12ed
ACM
61void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
62{
63 int cpu, thread;
64
65 for (cpu = 0; cpu < ncpus; cpu++)
66 for (thread = 0; thread < nthreads; ++thread) {
67 close(FD(evsel, cpu, thread));
68 FD(evsel, cpu, thread) = -1;
69 }
70}
71
70db7533 72void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus)
70082dd9 73{
70db7533 74 int cpu;
70082dd9 75
70db7533
ACM
76 for (cpu = 0; cpu < ncpus; cpu++) {
77 if (evlist->mmap[cpu].base != NULL) {
78 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
79 evlist->mmap[cpu].base = NULL;
70082dd9 80 }
70db7533 81 }
70082dd9
ACM
82}
83
70db7533 84int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus)
70082dd9 85{
70db7533
ACM
86 evlist->mmap = zalloc(ncpus * sizeof(struct perf_mmap));
87 return evlist->mmap != NULL ? 0 : -ENOMEM;
70082dd9
ACM
88}
89
69aad6f1
ACM
90void perf_evsel__delete(struct perf_evsel *evsel)
91{
92 assert(list_empty(&evsel->node));
93 xyarray__delete(evsel->fd);
70db7533 94 xyarray__delete(evsel->id);
69aad6f1
ACM
95 free(evsel);
96}
c52b12ed
ACM
97
98int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
99 int cpu, int thread, bool scale)
100{
101 struct perf_counts_values count;
102 size_t nv = scale ? 3 : 1;
103
104 if (FD(evsel, cpu, thread) < 0)
105 return -EINVAL;
106
4eed11d5
ACM
107 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
108 return -ENOMEM;
109
c52b12ed
ACM
110 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
111 return -errno;
112
113 if (scale) {
114 if (count.run == 0)
115 count.val = 0;
116 else if (count.run < count.ena)
117 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
118 } else
119 count.ena = count.run = 0;
120
121 evsel->counts->cpu[cpu] = count;
122 return 0;
123}
124
125int __perf_evsel__read(struct perf_evsel *evsel,
126 int ncpus, int nthreads, bool scale)
127{
128 size_t nv = scale ? 3 : 1;
129 int cpu, thread;
130 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
131
132 aggr->val = 0;
133
134 for (cpu = 0; cpu < ncpus; cpu++) {
135 for (thread = 0; thread < nthreads; thread++) {
136 if (FD(evsel, cpu, thread) < 0)
137 continue;
138
139 if (readn(FD(evsel, cpu, thread),
140 &count, nv * sizeof(u64)) < 0)
141 return -errno;
142
143 aggr->val += count.val;
144 if (scale) {
145 aggr->ena += count.ena;
146 aggr->run += count.run;
147 }
148 }
149 }
150
151 evsel->counts->scaled = 0;
152 if (scale) {
153 if (aggr->run == 0) {
154 evsel->counts->scaled = -1;
155 aggr->val = 0;
156 return 0;
157 }
158
159 if (aggr->run < aggr->ena) {
160 evsel->counts->scaled = 1;
161 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
162 }
163 } else
164 aggr->ena = aggr->run = 0;
165
166 return 0;
167}
48290609 168
0252208e 169static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
9d04f178 170 struct thread_map *threads, bool group, bool inherit)
48290609 171{
0252208e 172 int cpu, thread;
48290609 173
0252208e
ACM
174 if (evsel->fd == NULL &&
175 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
4eed11d5
ACM
176 return -1;
177
86bd5e86 178 for (cpu = 0; cpu < cpus->nr; cpu++) {
f08199d3
ACM
179 int group_fd = -1;
180
9d04f178
ACM
181 evsel->attr.inherit = (cpus->map[cpu] < 0) && inherit;
182
0252208e
ACM
183 for (thread = 0; thread < threads->nr; thread++) {
184 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
185 threads->map[thread],
f08199d3
ACM
186 cpus->map[cpu],
187 group_fd, 0);
0252208e
ACM
188 if (FD(evsel, cpu, thread) < 0)
189 goto out_close;
f08199d3
ACM
190
191 if (group && group_fd == -1)
192 group_fd = FD(evsel, cpu, thread);
0252208e 193 }
48290609
ACM
194 }
195
196 return 0;
197
198out_close:
0252208e
ACM
199 do {
200 while (--thread >= 0) {
201 close(FD(evsel, cpu, thread));
202 FD(evsel, cpu, thread) = -1;
203 }
204 thread = threads->nr;
205 } while (--cpu >= 0);
48290609
ACM
206 return -1;
207}
208
0252208e
ACM
209static struct {
210 struct cpu_map map;
211 int cpus[1];
212} empty_cpu_map = {
213 .map.nr = 1,
214 .cpus = { -1, },
215};
216
217static struct {
218 struct thread_map map;
219 int threads[1];
220} empty_thread_map = {
221 .map.nr = 1,
222 .threads = { -1, },
223};
224
f08199d3 225int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
9d04f178 226 struct thread_map *threads, bool group, bool inherit)
48290609 227{
0252208e
ACM
228 if (cpus == NULL) {
229 /* Work around old compiler warnings about strict aliasing */
230 cpus = &empty_cpu_map.map;
48290609
ACM
231 }
232
0252208e
ACM
233 if (threads == NULL)
234 threads = &empty_thread_map.map;
48290609 235
9d04f178 236 return __perf_evsel__open(evsel, cpus, threads, group, inherit);
48290609
ACM
237}
238
f08199d3 239int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
9d04f178 240 struct cpu_map *cpus, bool group, bool inherit)
48290609 241{
9d04f178 242 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
0252208e 243}
48290609 244
f08199d3 245int perf_evsel__open_per_thread(struct perf_evsel *evsel,
9d04f178 246 struct thread_map *threads, bool group, bool inherit)
0252208e 247{
9d04f178 248 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
48290609 249}
70082dd9 250
70db7533
ACM
251static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
252 int mask, int fd)
253{
254 evlist->mmap[cpu].prev = 0;
255 evlist->mmap[cpu].mask = mask;
256 evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
257 MAP_SHARED, fd, 0);
258 if (evlist->mmap[cpu].base == MAP_FAILED)
259 return -1;
260
261 perf_evlist__add_pollfd(evlist, fd);
262 return 0;
263}
264
265static int perf_evlist__id_hash(struct perf_evlist *evlist, struct perf_evsel *evsel,
266 int cpu, int thread, int fd)
267{
268 struct perf_sample_id *sid;
269 u64 read_data[4] = { 0, };
270 int hash, id_idx = 1; /* The first entry is the counter value */
271
272 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
273 read(fd, &read_data, sizeof(read_data)) == -1)
274 return -1;
275
276 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
277 ++id_idx;
278 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
279 ++id_idx;
280
281 sid = SID(evsel, cpu, thread);
282 sid->id = read_data[id_idx];
283 sid->evsel = evsel;
284 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
285 hlist_add_head(&sid->node, &evlist->heads[hash]);
286 return 0;
287}
288
289/** perf_evlist__mmap - Create per cpu maps to receive events
290 *
291 * @evlist - list of events
292 * @cpus - cpu map being monitored
293 * @threads - threads map being monitored
294 * @pages - map length in pages
295 * @overwrite - overwrite older events?
296 *
297 * If overwrite is false the user needs to signal event consuption using:
298 *
299 * struct perf_mmap *m = &evlist->mmap[cpu];
300 * unsigned int head = perf_mmap__read_head(m);
301 *
302 * perf_mmap__write_tail(m, head)
303 */
304int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus,
305 struct thread_map *threads, int pages, bool overwrite)
70082dd9
ACM
306{
307 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
308 int mask = pages * page_size - 1, cpu;
70db7533
ACM
309 struct perf_evsel *first_evsel, *evsel;
310 int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
70082dd9 311
70db7533
ACM
312 if (evlist->mmap == NULL &&
313 perf_evlist__alloc_mmap(evlist, cpus->nr) < 0)
70082dd9
ACM
314 return -ENOMEM;
315
70db7533
ACM
316 if (evlist->pollfd == NULL &&
317 perf_evlist__alloc_pollfd(evlist, cpus->nr, threads->nr) < 0)
318 return -ENOMEM;
70082dd9 319
70db7533
ACM
320 evlist->mmap_len = (pages + 1) * page_size;
321 first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
322
323 list_for_each_entry(evsel, &evlist->entries, node) {
324 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
325 evsel->id == NULL &&
326 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
327 return -ENOMEM;
328
329 for (cpu = 0; cpu < cpus->nr; cpu++) {
330 for (thread = 0; thread < threads->nr; thread++) {
331 int fd = FD(evsel, cpu, thread);
332
333 if (evsel->idx || thread) {
334 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
335 FD(first_evsel, cpu, 0)) != 0)
336 goto out_unmap;
337 } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
338 goto out_unmap;
339
340 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
341 perf_evlist__id_hash(evlist, evsel, cpu, thread, fd) < 0)
342 goto out_unmap;
343 }
70082dd9
ACM
344 }
345 }
346
347 return 0;
348
349out_unmap:
70db7533
ACM
350 for (cpu = 0; cpu < cpus->nr; cpu++) {
351 if (evlist->mmap[cpu].base != NULL) {
352 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
353 evlist->mmap[cpu].base = NULL;
70082dd9 354 }
70db7533 355 }
70082dd9
ACM
356 return -1;
357}
d0dd74e8
ACM
358
359static int event__parse_id_sample(const event_t *event, u64 type,
360 struct sample_data *sample)
361{
362 const u64 *array = event->sample.array;
363
364 array += ((event->header.size -
365 sizeof(event->header)) / sizeof(u64)) - 1;
366
367 if (type & PERF_SAMPLE_CPU) {
368 u32 *p = (u32 *)array;
369 sample->cpu = *p;
370 array--;
371 }
372
373 if (type & PERF_SAMPLE_STREAM_ID) {
374 sample->stream_id = *array;
375 array--;
376 }
377
378 if (type & PERF_SAMPLE_ID) {
379 sample->id = *array;
380 array--;
381 }
382
383 if (type & PERF_SAMPLE_TIME) {
384 sample->time = *array;
385 array--;
386 }
387
388 if (type & PERF_SAMPLE_TID) {
389 u32 *p = (u32 *)array;
390 sample->pid = p[0];
391 sample->tid = p[1];
392 }
393
394 return 0;
395}
396
397int event__parse_sample(const event_t *event, u64 type, bool sample_id_all,
398 struct sample_data *data)
399{
400 const u64 *array;
401
402 data->cpu = data->pid = data->tid = -1;
403 data->stream_id = data->id = data->time = -1ULL;
404
405 if (event->header.type != PERF_RECORD_SAMPLE) {
406 if (!sample_id_all)
407 return 0;
408 return event__parse_id_sample(event, type, data);
409 }
410
411 array = event->sample.array;
412
413 if (type & PERF_SAMPLE_IP) {
414 data->ip = event->ip.ip;
415 array++;
416 }
417
418 if (type & PERF_SAMPLE_TID) {
419 u32 *p = (u32 *)array;
420 data->pid = p[0];
421 data->tid = p[1];
422 array++;
423 }
424
425 if (type & PERF_SAMPLE_TIME) {
426 data->time = *array;
427 array++;
428 }
429
430 if (type & PERF_SAMPLE_ADDR) {
431 data->addr = *array;
432 array++;
433 }
434
435 data->id = -1ULL;
436 if (type & PERF_SAMPLE_ID) {
437 data->id = *array;
438 array++;
439 }
440
441 if (type & PERF_SAMPLE_STREAM_ID) {
442 data->stream_id = *array;
443 array++;
444 }
445
446 if (type & PERF_SAMPLE_CPU) {
447 u32 *p = (u32 *)array;
448 data->cpu = *p;
449 array++;
450 }
451
452 if (type & PERF_SAMPLE_PERIOD) {
453 data->period = *array;
454 array++;
455 }
456
457 if (type & PERF_SAMPLE_READ) {
458 fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
459 return -1;
460 }
461
462 if (type & PERF_SAMPLE_CALLCHAIN) {
463 data->callchain = (struct ip_callchain *)array;
464 array += 1 + data->callchain->nr;
465 }
466
467 if (type & PERF_SAMPLE_RAW) {
468 u32 *p = (u32 *)array;
469 data->raw_size = *p;
470 p++;
471 data->raw_data = p;
472 }
473
474 return 0;
475}
This page took 0.061347 seconds and 5 git commands to generate.