cca29ededb5b7118c6b2dc91d718c61ed00304ba
[deliverable/linux.git] / tools / perf / util / evsel.c
1 /*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
10 #include "evsel.h"
11 #include "evlist.h"
12 #include "util.h"
13 #include "cpumap.h"
14 #include "thread_map.h"
15
16 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
17
18 void perf_evsel__init(struct perf_evsel *evsel,
19 struct perf_event_attr *attr, int idx)
20 {
21 evsel->idx = idx;
22 evsel->attr = *attr;
23 INIT_LIST_HEAD(&evsel->node);
24 }
25
26 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
27 {
28 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
29
30 if (evsel != NULL)
31 perf_evsel__init(evsel, attr, idx);
32
33 return evsel;
34 }
35
36 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
37 {
38 int cpu, thread;
39 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
40
41 if (evsel->fd) {
42 for (cpu = 0; cpu < ncpus; cpu++) {
43 for (thread = 0; thread < nthreads; thread++) {
44 FD(evsel, cpu, thread) = -1;
45 }
46 }
47 }
48
49 return evsel->fd != NULL ? 0 : -ENOMEM;
50 }
51
52 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
53 {
54 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
55 if (evsel->sample_id == NULL)
56 return -ENOMEM;
57
58 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
59 if (evsel->id == NULL) {
60 xyarray__delete(evsel->sample_id);
61 evsel->sample_id = NULL;
62 return -ENOMEM;
63 }
64
65 return 0;
66 }
67
68 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
69 {
70 evsel->counts = zalloc((sizeof(*evsel->counts) +
71 (ncpus * sizeof(struct perf_counts_values))));
72 return evsel->counts != NULL ? 0 : -ENOMEM;
73 }
74
75 void perf_evsel__free_fd(struct perf_evsel *evsel)
76 {
77 xyarray__delete(evsel->fd);
78 evsel->fd = NULL;
79 }
80
81 void perf_evsel__free_id(struct perf_evsel *evsel)
82 {
83 xyarray__delete(evsel->sample_id);
84 evsel->sample_id = NULL;
85 free(evsel->id);
86 evsel->id = NULL;
87 }
88
89 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
90 {
91 int cpu, thread;
92
93 for (cpu = 0; cpu < ncpus; cpu++)
94 for (thread = 0; thread < nthreads; ++thread) {
95 close(FD(evsel, cpu, thread));
96 FD(evsel, cpu, thread) = -1;
97 }
98 }
99
100 void perf_evsel__exit(struct perf_evsel *evsel)
101 {
102 assert(list_empty(&evsel->node));
103 xyarray__delete(evsel->fd);
104 xyarray__delete(evsel->sample_id);
105 free(evsel->id);
106 }
107
108 void perf_evsel__delete(struct perf_evsel *evsel)
109 {
110 perf_evsel__exit(evsel);
111 close_cgroup(evsel->cgrp);
112 free(evsel->name);
113 free(evsel);
114 }
115
116 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
117 int cpu, int thread, bool scale)
118 {
119 struct perf_counts_values count;
120 size_t nv = scale ? 3 : 1;
121
122 if (FD(evsel, cpu, thread) < 0)
123 return -EINVAL;
124
125 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
126 return -ENOMEM;
127
128 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
129 return -errno;
130
131 if (scale) {
132 if (count.run == 0)
133 count.val = 0;
134 else if (count.run < count.ena)
135 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
136 } else
137 count.ena = count.run = 0;
138
139 evsel->counts->cpu[cpu] = count;
140 return 0;
141 }
142
143 int __perf_evsel__read(struct perf_evsel *evsel,
144 int ncpus, int nthreads, bool scale)
145 {
146 size_t nv = scale ? 3 : 1;
147 int cpu, thread;
148 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
149
150 aggr->val = aggr->ena = aggr->run = 0;
151
152 for (cpu = 0; cpu < ncpus; cpu++) {
153 for (thread = 0; thread < nthreads; thread++) {
154 if (FD(evsel, cpu, thread) < 0)
155 continue;
156
157 if (readn(FD(evsel, cpu, thread),
158 &count, nv * sizeof(u64)) < 0)
159 return -errno;
160
161 aggr->val += count.val;
162 if (scale) {
163 aggr->ena += count.ena;
164 aggr->run += count.run;
165 }
166 }
167 }
168
169 evsel->counts->scaled = 0;
170 if (scale) {
171 if (aggr->run == 0) {
172 evsel->counts->scaled = -1;
173 aggr->val = 0;
174 return 0;
175 }
176
177 if (aggr->run < aggr->ena) {
178 evsel->counts->scaled = 1;
179 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
180 }
181 } else
182 aggr->ena = aggr->run = 0;
183
184 return 0;
185 }
186
187 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
188 struct thread_map *threads, bool group)
189 {
190 int cpu, thread;
191 unsigned long flags = 0;
192 int pid = -1;
193
194 if (evsel->fd == NULL &&
195 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
196 return -1;
197
198 if (evsel->cgrp) {
199 flags = PERF_FLAG_PID_CGROUP;
200 pid = evsel->cgrp->fd;
201 }
202
203 for (cpu = 0; cpu < cpus->nr; cpu++) {
204 int group_fd = -1;
205
206 for (thread = 0; thread < threads->nr; thread++) {
207
208 if (!evsel->cgrp)
209 pid = threads->map[thread];
210
211 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
212 pid,
213 cpus->map[cpu],
214 group_fd, flags);
215 if (FD(evsel, cpu, thread) < 0)
216 goto out_close;
217
218 if (group && group_fd == -1)
219 group_fd = FD(evsel, cpu, thread);
220 }
221 }
222
223 return 0;
224
225 out_close:
226 do {
227 while (--thread >= 0) {
228 close(FD(evsel, cpu, thread));
229 FD(evsel, cpu, thread) = -1;
230 }
231 thread = threads->nr;
232 } while (--cpu >= 0);
233 return -1;
234 }
235
236 static struct {
237 struct cpu_map map;
238 int cpus[1];
239 } empty_cpu_map = {
240 .map.nr = 1,
241 .cpus = { -1, },
242 };
243
244 static struct {
245 struct thread_map map;
246 int threads[1];
247 } empty_thread_map = {
248 .map.nr = 1,
249 .threads = { -1, },
250 };
251
252 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
253 struct thread_map *threads, bool group)
254 {
255 if (cpus == NULL) {
256 /* Work around old compiler warnings about strict aliasing */
257 cpus = &empty_cpu_map.map;
258 }
259
260 if (threads == NULL)
261 threads = &empty_thread_map.map;
262
263 return __perf_evsel__open(evsel, cpus, threads, group);
264 }
265
266 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
267 struct cpu_map *cpus, bool group)
268 {
269 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
270 }
271
272 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
273 struct thread_map *threads, bool group)
274 {
275 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
276 }
277
278 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
279 struct perf_sample *sample)
280 {
281 const u64 *array = event->sample.array;
282
283 array += ((event->header.size -
284 sizeof(event->header)) / sizeof(u64)) - 1;
285
286 if (type & PERF_SAMPLE_CPU) {
287 u32 *p = (u32 *)array;
288 sample->cpu = *p;
289 array--;
290 }
291
292 if (type & PERF_SAMPLE_STREAM_ID) {
293 sample->stream_id = *array;
294 array--;
295 }
296
297 if (type & PERF_SAMPLE_ID) {
298 sample->id = *array;
299 array--;
300 }
301
302 if (type & PERF_SAMPLE_TIME) {
303 sample->time = *array;
304 array--;
305 }
306
307 if (type & PERF_SAMPLE_TID) {
308 u32 *p = (u32 *)array;
309 sample->pid = p[0];
310 sample->tid = p[1];
311 }
312
313 return 0;
314 }
315
316 static bool sample_overlap(const union perf_event *event,
317 const void *offset, u64 size)
318 {
319 const void *base = event;
320
321 if (offset + size > base + event->header.size)
322 return true;
323
324 return false;
325 }
326
327 int perf_event__parse_sample(const union perf_event *event, u64 type,
328 int sample_size, bool sample_id_all,
329 struct perf_sample *data)
330 {
331 const u64 *array;
332
333 data->cpu = data->pid = data->tid = -1;
334 data->stream_id = data->id = data->time = -1ULL;
335
336 if (event->header.type != PERF_RECORD_SAMPLE) {
337 if (!sample_id_all)
338 return 0;
339 return perf_event__parse_id_sample(event, type, data);
340 }
341
342 array = event->sample.array;
343
344 if (sample_size + sizeof(event->header) > event->header.size)
345 return -EFAULT;
346
347 if (type & PERF_SAMPLE_IP) {
348 data->ip = event->ip.ip;
349 array++;
350 }
351
352 if (type & PERF_SAMPLE_TID) {
353 u32 *p = (u32 *)array;
354 data->pid = p[0];
355 data->tid = p[1];
356 array++;
357 }
358
359 if (type & PERF_SAMPLE_TIME) {
360 data->time = *array;
361 array++;
362 }
363
364 if (type & PERF_SAMPLE_ADDR) {
365 data->addr = *array;
366 array++;
367 }
368
369 data->id = -1ULL;
370 if (type & PERF_SAMPLE_ID) {
371 data->id = *array;
372 array++;
373 }
374
375 if (type & PERF_SAMPLE_STREAM_ID) {
376 data->stream_id = *array;
377 array++;
378 }
379
380 if (type & PERF_SAMPLE_CPU) {
381 u32 *p = (u32 *)array;
382 data->cpu = *p;
383 array++;
384 }
385
386 if (type & PERF_SAMPLE_PERIOD) {
387 data->period = *array;
388 array++;
389 }
390
391 if (type & PERF_SAMPLE_READ) {
392 fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
393 return -1;
394 }
395
396 if (type & PERF_SAMPLE_CALLCHAIN) {
397 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
398 return -EFAULT;
399
400 data->callchain = (struct ip_callchain *)array;
401
402 if (sample_overlap(event, array, data->callchain->nr))
403 return -EFAULT;
404
405 array += 1 + data->callchain->nr;
406 }
407
408 if (type & PERF_SAMPLE_RAW) {
409 u32 *p = (u32 *)array;
410
411 if (sample_overlap(event, array, sizeof(u32)))
412 return -EFAULT;
413
414 data->raw_size = *p;
415 p++;
416
417 if (sample_overlap(event, p, data->raw_size))
418 return -EFAULT;
419
420 data->raw_data = p;
421 }
422
423 return 0;
424 }
This page took 0.037908 seconds and 4 git commands to generate.