perf machine: Move more machine methods to machine.c
[deliverable/linux.git] / tools / perf / util / evsel.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
936be503 10#include <byteswap.h>
0f6a3015 11#include <linux/bitops.h>
936be503 12#include "asm/bug.h"
efd2b924 13#include "debugfs.h"
5555ded4 14#include "event-parse.h"
69aad6f1 15#include "evsel.h"
70082dd9 16#include "evlist.h"
69aad6f1 17#include "util.h"
86bd5e86 18#include "cpumap.h"
fd78260b 19#include "thread_map.h"
12864b31 20#include "target.h"
d2709c7c
DH
21#include <linux/hw_breakpoint.h>
22#include <linux/perf_event.h>
26d33022 23#include "perf_regs.h"
69aad6f1 24
c52b12ed
ACM
25#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
26
bde09467 27static int __perf_evsel__sample_size(u64 sample_type)
c2a70653
ACM
28{
29 u64 mask = sample_type & PERF_SAMPLE_MASK;
30 int size = 0;
31 int i;
32
33 for (i = 0; i < 64; i++) {
34 if (mask & (1ULL << i))
35 size++;
36 }
37
38 size *= sizeof(u64);
39
40 return size;
41}
42
4bf9ce1b 43void hists__init(struct hists *hists)
0e2a5f10
ACM
44{
45 memset(hists, 0, sizeof(*hists));
46 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
47 hists->entries_in = &hists->entries_in_array[0];
48 hists->entries_collapsed = RB_ROOT;
49 hists->entries = RB_ROOT;
50 pthread_mutex_init(&hists->lock, NULL);
51}
52
ef1d1af2
ACM
53void perf_evsel__init(struct perf_evsel *evsel,
54 struct perf_event_attr *attr, int idx)
55{
56 evsel->idx = idx;
57 evsel->attr = *attr;
2cfda562 58 evsel->leader = evsel;
ef1d1af2 59 INIT_LIST_HEAD(&evsel->node);
1980c2eb 60 hists__init(&evsel->hists);
bde09467 61 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
ef1d1af2
ACM
62}
63
23a2f3ab 64struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
69aad6f1
ACM
65{
66 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
67
ef1d1af2
ACM
68 if (evsel != NULL)
69 perf_evsel__init(evsel, attr, idx);
69aad6f1
ACM
70
71 return evsel;
72}
73
201b7334 74struct event_format *event_format__new(const char *sys, const char *name)
efd2b924
ACM
75{
76 int fd, n;
77 char *filename;
78 void *bf = NULL, *nbf;
79 size_t size = 0, alloc_size = 0;
80 struct event_format *format = NULL;
81
82 if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
83 goto out;
84
85 fd = open(filename, O_RDONLY);
86 if (fd < 0)
87 goto out_free_filename;
88
89 do {
90 if (size == alloc_size) {
91 alloc_size += BUFSIZ;
92 nbf = realloc(bf, alloc_size);
93 if (nbf == NULL)
94 goto out_free_bf;
95 bf = nbf;
96 }
97
98 n = read(fd, bf + size, BUFSIZ);
99 if (n < 0)
100 goto out_free_bf;
101 size += n;
102 } while (n > 0);
103
104 pevent_parse_format(&format, bf, size, sys);
105
106out_free_bf:
107 free(bf);
108 close(fd);
109out_free_filename:
110 free(filename);
111out:
112 return format;
113}
114
115struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
116{
117 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
118
119 if (evsel != NULL) {
120 struct perf_event_attr attr = {
0b80f8b3
ACM
121 .type = PERF_TYPE_TRACEPOINT,
122 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
123 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
efd2b924
ACM
124 };
125
e48ffe2b
ACM
126 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
127 goto out_free;
128
efd2b924
ACM
129 evsel->tp_format = event_format__new(sys, name);
130 if (evsel->tp_format == NULL)
131 goto out_free;
132
0b80f8b3 133 event_attr_init(&attr);
efd2b924 134 attr.config = evsel->tp_format->id;
0b80f8b3 135 attr.sample_period = 1;
efd2b924 136 perf_evsel__init(evsel, &attr, idx);
efd2b924
ACM
137 }
138
139 return evsel;
140
141out_free:
e48ffe2b 142 free(evsel->name);
efd2b924
ACM
143 free(evsel);
144 return NULL;
145}
146
8ad7013b 147const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
c410431c
ACM
148 "cycles",
149 "instructions",
150 "cache-references",
151 "cache-misses",
152 "branches",
153 "branch-misses",
154 "bus-cycles",
155 "stalled-cycles-frontend",
156 "stalled-cycles-backend",
157 "ref-cycles",
158};
159
dd4f5223 160static const char *__perf_evsel__hw_name(u64 config)
c410431c
ACM
161{
162 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
163 return perf_evsel__hw_names[config];
164
165 return "unknown-hardware";
166}
167
27f18617 168static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
c410431c 169{
27f18617 170 int colon = 0, r = 0;
c410431c 171 struct perf_event_attr *attr = &evsel->attr;
c410431c
ACM
172 bool exclude_guest_default = false;
173
174#define MOD_PRINT(context, mod) do { \
175 if (!attr->exclude_##context) { \
27f18617 176 if (!colon) colon = ++r; \
c410431c
ACM
177 r += scnprintf(bf + r, size - r, "%c", mod); \
178 } } while(0)
179
180 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
181 MOD_PRINT(kernel, 'k');
182 MOD_PRINT(user, 'u');
183 MOD_PRINT(hv, 'h');
184 exclude_guest_default = true;
185 }
186
187 if (attr->precise_ip) {
188 if (!colon)
27f18617 189 colon = ++r;
c410431c
ACM
190 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
191 exclude_guest_default = true;
192 }
193
194 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
195 MOD_PRINT(host, 'H');
196 MOD_PRINT(guest, 'G');
197 }
198#undef MOD_PRINT
199 if (colon)
27f18617 200 bf[colon - 1] = ':';
c410431c
ACM
201 return r;
202}
203
27f18617
ACM
204static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
205{
206 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
207 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
208}
209
8ad7013b 210const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
335c2f5d
ACM
211 "cpu-clock",
212 "task-clock",
213 "page-faults",
214 "context-switches",
8ad7013b 215 "cpu-migrations",
335c2f5d
ACM
216 "minor-faults",
217 "major-faults",
218 "alignment-faults",
219 "emulation-faults",
220};
221
dd4f5223 222static const char *__perf_evsel__sw_name(u64 config)
335c2f5d
ACM
223{
224 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
225 return perf_evsel__sw_names[config];
226 return "unknown-software";
227}
228
229static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
230{
231 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
232 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
233}
234
287e74aa
JO
235static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
236{
237 int r;
238
239 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
240
241 if (type & HW_BREAKPOINT_R)
242 r += scnprintf(bf + r, size - r, "r");
243
244 if (type & HW_BREAKPOINT_W)
245 r += scnprintf(bf + r, size - r, "w");
246
247 if (type & HW_BREAKPOINT_X)
248 r += scnprintf(bf + r, size - r, "x");
249
250 return r;
251}
252
253static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
254{
255 struct perf_event_attr *attr = &evsel->attr;
256 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
257 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
258}
259
0b668bc9
ACM
260const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
261 [PERF_EVSEL__MAX_ALIASES] = {
262 { "L1-dcache", "l1-d", "l1d", "L1-data", },
263 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
264 { "LLC", "L2", },
265 { "dTLB", "d-tlb", "Data-TLB", },
266 { "iTLB", "i-tlb", "Instruction-TLB", },
267 { "branch", "branches", "bpu", "btb", "bpc", },
268 { "node", },
269};
270
271const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
272 [PERF_EVSEL__MAX_ALIASES] = {
273 { "load", "loads", "read", },
274 { "store", "stores", "write", },
275 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
276};
277
278const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
279 [PERF_EVSEL__MAX_ALIASES] = {
280 { "refs", "Reference", "ops", "access", },
281 { "misses", "miss", },
282};
283
284#define C(x) PERF_COUNT_HW_CACHE_##x
285#define CACHE_READ (1 << C(OP_READ))
286#define CACHE_WRITE (1 << C(OP_WRITE))
287#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
288#define COP(x) (1 << x)
289
290/*
291 * cache operartion stat
292 * L1I : Read and prefetch only
293 * ITLB and BPU : Read-only
294 */
295static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
296 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
297 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
298 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
299 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
300 [C(ITLB)] = (CACHE_READ),
301 [C(BPU)] = (CACHE_READ),
302 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
303};
304
305bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
306{
307 if (perf_evsel__hw_cache_stat[type] & COP(op))
308 return true; /* valid */
309 else
310 return false; /* invalid */
311}
312
313int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
314 char *bf, size_t size)
315{
316 if (result) {
317 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
318 perf_evsel__hw_cache_op[op][0],
319 perf_evsel__hw_cache_result[result][0]);
320 }
321
322 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
323 perf_evsel__hw_cache_op[op][1]);
324}
325
dd4f5223 326static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
0b668bc9
ACM
327{
328 u8 op, result, type = (config >> 0) & 0xff;
329 const char *err = "unknown-ext-hardware-cache-type";
330
331 if (type > PERF_COUNT_HW_CACHE_MAX)
332 goto out_err;
333
334 op = (config >> 8) & 0xff;
335 err = "unknown-ext-hardware-cache-op";
336 if (op > PERF_COUNT_HW_CACHE_OP_MAX)
337 goto out_err;
338
339 result = (config >> 16) & 0xff;
340 err = "unknown-ext-hardware-cache-result";
341 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
342 goto out_err;
343
344 err = "invalid-cache";
345 if (!perf_evsel__is_cache_op_valid(type, op))
346 goto out_err;
347
348 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
349out_err:
350 return scnprintf(bf, size, "%s", err);
351}
352
353static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
354{
355 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
356 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
357}
358
6eef3d9c
ACM
359static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
360{
361 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
362 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
363}
364
7289f83c 365const char *perf_evsel__name(struct perf_evsel *evsel)
a4460836 366{
7289f83c 367 char bf[128];
a4460836 368
7289f83c
ACM
369 if (evsel->name)
370 return evsel->name;
c410431c
ACM
371
372 switch (evsel->attr.type) {
373 case PERF_TYPE_RAW:
6eef3d9c 374 perf_evsel__raw_name(evsel, bf, sizeof(bf));
c410431c
ACM
375 break;
376
377 case PERF_TYPE_HARDWARE:
7289f83c 378 perf_evsel__hw_name(evsel, bf, sizeof(bf));
c410431c 379 break;
0b668bc9
ACM
380
381 case PERF_TYPE_HW_CACHE:
7289f83c 382 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
0b668bc9
ACM
383 break;
384
335c2f5d 385 case PERF_TYPE_SOFTWARE:
7289f83c 386 perf_evsel__sw_name(evsel, bf, sizeof(bf));
335c2f5d
ACM
387 break;
388
a4460836 389 case PERF_TYPE_TRACEPOINT:
7289f83c 390 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
a4460836
ACM
391 break;
392
287e74aa
JO
393 case PERF_TYPE_BREAKPOINT:
394 perf_evsel__bp_name(evsel, bf, sizeof(bf));
395 break;
396
c410431c 397 default:
ca1b1457
RR
398 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
399 evsel->attr.type);
a4460836 400 break;
c410431c
ACM
401 }
402
7289f83c
ACM
403 evsel->name = strdup(bf);
404
405 return evsel->name ?: "unknown";
c410431c
ACM
406}
407
774cb499
JO
408/*
409 * The enable_on_exec/disabled value strategy:
410 *
411 * 1) For any type of traced program:
412 * - all independent events and group leaders are disabled
413 * - all group members are enabled
414 *
415 * Group members are ruled by group leaders. They need to
416 * be enabled, because the group scheduling relies on that.
417 *
418 * 2) For traced programs executed by perf:
419 * - all independent events and group leaders have
420 * enable_on_exec set
421 * - we don't specifically enable or disable any event during
422 * the record command
423 *
424 * Independent events and group leaders are initially disabled
425 * and get enabled by exec. Group members are ruled by group
426 * leaders as stated in 1).
427 *
428 * 3) For traced programs attached by perf (pid/tid):
429 * - we specifically enable or disable all events during
430 * the record command
431 *
432 * When attaching events to already running traced we
433 * enable/disable events specifically, as there's no
434 * initial traced exec call.
435 */
cac21425
JO
436void perf_evsel__config(struct perf_evsel *evsel,
437 struct perf_record_opts *opts)
0f82ebc4
ACM
438{
439 struct perf_event_attr *attr = &evsel->attr;
440 int track = !evsel->idx; /* only the first counter needs these */
441
808e1226 442 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
0f82ebc4
ACM
443 attr->inherit = !opts->no_inherit;
444 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
445 PERF_FORMAT_TOTAL_TIME_RUNNING |
446 PERF_FORMAT_ID;
447
448 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
449
450 /*
451 * We default some events to a 1 default interval. But keep
452 * it a weak assumption overridable by the user.
453 */
454 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
455 opts->user_interval != ULLONG_MAX)) {
456 if (opts->freq) {
457 attr->sample_type |= PERF_SAMPLE_PERIOD;
458 attr->freq = 1;
459 attr->sample_freq = opts->freq;
460 } else {
461 attr->sample_period = opts->default_interval;
462 }
463 }
464
465 if (opts->no_samples)
466 attr->sample_freq = 0;
467
468 if (opts->inherit_stat)
469 attr->inherit_stat = 1;
470
471 if (opts->sample_address) {
472 attr->sample_type |= PERF_SAMPLE_ADDR;
473 attr->mmap_data = track;
474 }
475
26d33022 476 if (opts->call_graph) {
0f82ebc4
ACM
477 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
478
26d33022
JO
479 if (opts->call_graph == CALLCHAIN_DWARF) {
480 attr->sample_type |= PERF_SAMPLE_REGS_USER |
481 PERF_SAMPLE_STACK_USER;
482 attr->sample_regs_user = PERF_REGS_MASK;
483 attr->sample_stack_user = opts->stack_dump_size;
484 attr->exclude_callchain_user = 1;
485 }
486 }
487
e40ee742 488 if (perf_target__has_cpu(&opts->target))
0f82ebc4
ACM
489 attr->sample_type |= PERF_SAMPLE_CPU;
490
3e76ac78
AV
491 if (opts->period)
492 attr->sample_type |= PERF_SAMPLE_PERIOD;
493
808e1226 494 if (!opts->sample_id_all_missing &&
d67356e7 495 (opts->sample_time || !opts->no_inherit ||
aa22dd49 496 perf_target__has_cpu(&opts->target)))
0f82ebc4
ACM
497 attr->sample_type |= PERF_SAMPLE_TIME;
498
499 if (opts->raw_samples) {
500 attr->sample_type |= PERF_SAMPLE_TIME;
501 attr->sample_type |= PERF_SAMPLE_RAW;
502 attr->sample_type |= PERF_SAMPLE_CPU;
503 }
504
505 if (opts->no_delay) {
506 attr->watermark = 0;
507 attr->wakeup_events = 1;
508 }
bdfebd84
RAV
509 if (opts->branch_stack) {
510 attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
511 attr->branch_sample_type = opts->branch_stack;
512 }
0f82ebc4
ACM
513
514 attr->mmap = track;
515 attr->comm = track;
516
774cb499
JO
517 /*
518 * XXX see the function comment above
519 *
520 * Disabling only independent events or group leaders,
521 * keeping group members enabled.
522 */
823254ed 523 if (perf_evsel__is_group_leader(evsel))
774cb499
JO
524 attr->disabled = 1;
525
526 /*
527 * Setting enable_on_exec for independent events and
528 * group leaders for traced executed by perf.
529 */
823254ed 530 if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
0f82ebc4 531 attr->enable_on_exec = 1;
0f82ebc4
ACM
532}
533
69aad6f1
ACM
534int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
535{
4af4c955 536 int cpu, thread;
69aad6f1 537 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
4af4c955
DA
538
539 if (evsel->fd) {
540 for (cpu = 0; cpu < ncpus; cpu++) {
541 for (thread = 0; thread < nthreads; thread++) {
542 FD(evsel, cpu, thread) = -1;
543 }
544 }
545 }
546
69aad6f1
ACM
547 return evsel->fd != NULL ? 0 : -ENOMEM;
548}
549
745cefc5
ACM
550int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
551 const char *filter)
552{
553 int cpu, thread;
554
555 for (cpu = 0; cpu < ncpus; cpu++) {
556 for (thread = 0; thread < nthreads; thread++) {
557 int fd = FD(evsel, cpu, thread),
558 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
559
560 if (err)
561 return err;
562 }
563 }
564
565 return 0;
566}
567
70db7533
ACM
568int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
569{
a91e5431
ACM
570 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
571 if (evsel->sample_id == NULL)
572 return -ENOMEM;
573
574 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
575 if (evsel->id == NULL) {
576 xyarray__delete(evsel->sample_id);
577 evsel->sample_id = NULL;
578 return -ENOMEM;
579 }
580
581 return 0;
70db7533
ACM
582}
583
c52b12ed
ACM
584int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
585{
586 evsel->counts = zalloc((sizeof(*evsel->counts) +
587 (ncpus * sizeof(struct perf_counts_values))));
588 return evsel->counts != NULL ? 0 : -ENOMEM;
589}
590
69aad6f1
ACM
591void perf_evsel__free_fd(struct perf_evsel *evsel)
592{
593 xyarray__delete(evsel->fd);
594 evsel->fd = NULL;
595}
596
70db7533
ACM
597void perf_evsel__free_id(struct perf_evsel *evsel)
598{
a91e5431
ACM
599 xyarray__delete(evsel->sample_id);
600 evsel->sample_id = NULL;
601 free(evsel->id);
70db7533
ACM
602 evsel->id = NULL;
603}
604
c52b12ed
ACM
605void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
606{
607 int cpu, thread;
608
609 for (cpu = 0; cpu < ncpus; cpu++)
610 for (thread = 0; thread < nthreads; ++thread) {
611 close(FD(evsel, cpu, thread));
612 FD(evsel, cpu, thread) = -1;
613 }
614}
615
ef1d1af2 616void perf_evsel__exit(struct perf_evsel *evsel)
69aad6f1
ACM
617{
618 assert(list_empty(&evsel->node));
619 xyarray__delete(evsel->fd);
a91e5431
ACM
620 xyarray__delete(evsel->sample_id);
621 free(evsel->id);
ef1d1af2
ACM
622}
623
624void perf_evsel__delete(struct perf_evsel *evsel)
625{
626 perf_evsel__exit(evsel);
023695d9 627 close_cgroup(evsel->cgrp);
6a4bb04c 628 free(evsel->group_name);
e48ffe2b 629 if (evsel->tp_format)
efd2b924 630 pevent_free_format(evsel->tp_format);
f0c55bcf 631 free(evsel->name);
69aad6f1
ACM
632 free(evsel);
633}
c52b12ed
ACM
634
635int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
636 int cpu, int thread, bool scale)
637{
638 struct perf_counts_values count;
639 size_t nv = scale ? 3 : 1;
640
641 if (FD(evsel, cpu, thread) < 0)
642 return -EINVAL;
643
4eed11d5
ACM
644 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
645 return -ENOMEM;
646
c52b12ed
ACM
647 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
648 return -errno;
649
650 if (scale) {
651 if (count.run == 0)
652 count.val = 0;
653 else if (count.run < count.ena)
654 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
655 } else
656 count.ena = count.run = 0;
657
658 evsel->counts->cpu[cpu] = count;
659 return 0;
660}
661
662int __perf_evsel__read(struct perf_evsel *evsel,
663 int ncpus, int nthreads, bool scale)
664{
665 size_t nv = scale ? 3 : 1;
666 int cpu, thread;
667 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
668
52bcd994 669 aggr->val = aggr->ena = aggr->run = 0;
c52b12ed
ACM
670
671 for (cpu = 0; cpu < ncpus; cpu++) {
672 for (thread = 0; thread < nthreads; thread++) {
673 if (FD(evsel, cpu, thread) < 0)
674 continue;
675
676 if (readn(FD(evsel, cpu, thread),
677 &count, nv * sizeof(u64)) < 0)
678 return -errno;
679
680 aggr->val += count.val;
681 if (scale) {
682 aggr->ena += count.ena;
683 aggr->run += count.run;
684 }
685 }
686 }
687
688 evsel->counts->scaled = 0;
689 if (scale) {
690 if (aggr->run == 0) {
691 evsel->counts->scaled = -1;
692 aggr->val = 0;
693 return 0;
694 }
695
696 if (aggr->run < aggr->ena) {
697 evsel->counts->scaled = 1;
698 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
699 }
700 } else
701 aggr->ena = aggr->run = 0;
702
703 return 0;
704}
48290609 705
6a4bb04c
JO
706static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
707{
708 struct perf_evsel *leader = evsel->leader;
709 int fd;
710
823254ed 711 if (perf_evsel__is_group_leader(evsel))
6a4bb04c
JO
712 return -1;
713
714 /*
715 * Leader must be already processed/open,
716 * if not it's a bug.
717 */
718 BUG_ON(!leader->fd);
719
720 fd = FD(leader, cpu, thread);
721 BUG_ON(fd == -1);
722
723 return fd;
724}
725
0252208e 726static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
6a4bb04c 727 struct thread_map *threads)
48290609 728{
0252208e 729 int cpu, thread;
023695d9 730 unsigned long flags = 0;
727ab04e 731 int pid = -1, err;
48290609 732
0252208e
ACM
733 if (evsel->fd == NULL &&
734 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
727ab04e 735 return -ENOMEM;
4eed11d5 736
023695d9
SE
737 if (evsel->cgrp) {
738 flags = PERF_FLAG_PID_CGROUP;
739 pid = evsel->cgrp->fd;
740 }
741
86bd5e86 742 for (cpu = 0; cpu < cpus->nr; cpu++) {
9d04f178 743
0252208e 744 for (thread = 0; thread < threads->nr; thread++) {
6a4bb04c 745 int group_fd;
023695d9
SE
746
747 if (!evsel->cgrp)
748 pid = threads->map[thread];
749
6a4bb04c
JO
750 group_fd = get_group_fd(evsel, cpu, thread);
751
0252208e 752 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
023695d9 753 pid,
f08199d3 754 cpus->map[cpu],
023695d9 755 group_fd, flags);
727ab04e
ACM
756 if (FD(evsel, cpu, thread) < 0) {
757 err = -errno;
0252208e 758 goto out_close;
727ab04e 759 }
0252208e 760 }
48290609
ACM
761 }
762
763 return 0;
764
765out_close:
0252208e
ACM
766 do {
767 while (--thread >= 0) {
768 close(FD(evsel, cpu, thread));
769 FD(evsel, cpu, thread) = -1;
770 }
771 thread = threads->nr;
772 } while (--cpu >= 0);
727ab04e
ACM
773 return err;
774}
775
776void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
777{
778 if (evsel->fd == NULL)
779 return;
780
781 perf_evsel__close_fd(evsel, ncpus, nthreads);
782 perf_evsel__free_fd(evsel);
783 evsel->fd = NULL;
48290609
ACM
784}
785
0252208e
ACM
786static struct {
787 struct cpu_map map;
788 int cpus[1];
789} empty_cpu_map = {
790 .map.nr = 1,
791 .cpus = { -1, },
792};
793
794static struct {
795 struct thread_map map;
796 int threads[1];
797} empty_thread_map = {
798 .map.nr = 1,
799 .threads = { -1, },
800};
801
f08199d3 802int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
6a4bb04c 803 struct thread_map *threads)
48290609 804{
0252208e
ACM
805 if (cpus == NULL) {
806 /* Work around old compiler warnings about strict aliasing */
807 cpus = &empty_cpu_map.map;
48290609
ACM
808 }
809
0252208e
ACM
810 if (threads == NULL)
811 threads = &empty_thread_map.map;
48290609 812
6a4bb04c 813 return __perf_evsel__open(evsel, cpus, threads);
48290609
ACM
814}
815
f08199d3 816int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
6a4bb04c 817 struct cpu_map *cpus)
48290609 818{
6a4bb04c 819 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
0252208e 820}
48290609 821
f08199d3 822int perf_evsel__open_per_thread(struct perf_evsel *evsel,
6a4bb04c 823 struct thread_map *threads)
0252208e 824{
6a4bb04c 825 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
48290609 826}
70082dd9 827
0807d2d8
ACM
828static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
829 const union perf_event *event,
830 struct perf_sample *sample)
d0dd74e8 831{
0807d2d8 832 u64 type = evsel->attr.sample_type;
d0dd74e8 833 const u64 *array = event->sample.array;
0807d2d8 834 bool swapped = evsel->needs_swap;
37073f9e 835 union u64_swap u;
d0dd74e8
ACM
836
837 array += ((event->header.size -
838 sizeof(event->header)) / sizeof(u64)) - 1;
839
840 if (type & PERF_SAMPLE_CPU) {
37073f9e
JO
841 u.val64 = *array;
842 if (swapped) {
843 /* undo swap of u64, then swap on individual u32s */
844 u.val64 = bswap_64(u.val64);
845 u.val32[0] = bswap_32(u.val32[0]);
846 }
847
848 sample->cpu = u.val32[0];
d0dd74e8
ACM
849 array--;
850 }
851
852 if (type & PERF_SAMPLE_STREAM_ID) {
853 sample->stream_id = *array;
854 array--;
855 }
856
857 if (type & PERF_SAMPLE_ID) {
858 sample->id = *array;
859 array--;
860 }
861
862 if (type & PERF_SAMPLE_TIME) {
863 sample->time = *array;
864 array--;
865 }
866
867 if (type & PERF_SAMPLE_TID) {
37073f9e
JO
868 u.val64 = *array;
869 if (swapped) {
870 /* undo swap of u64, then swap on individual u32s */
871 u.val64 = bswap_64(u.val64);
872 u.val32[0] = bswap_32(u.val32[0]);
873 u.val32[1] = bswap_32(u.val32[1]);
874 }
875
876 sample->pid = u.val32[0];
877 sample->tid = u.val32[1];
d0dd74e8
ACM
878 }
879
880 return 0;
881}
882
98e1da90
FW
883static bool sample_overlap(const union perf_event *event,
884 const void *offset, u64 size)
885{
886 const void *base = event;
887
888 if (offset + size > base + event->header.size)
889 return true;
890
891 return false;
892}
893
a3f698fe 894int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
0807d2d8 895 struct perf_sample *data)
d0dd74e8 896{
a3f698fe 897 u64 type = evsel->attr.sample_type;
0f6a3015 898 u64 regs_user = evsel->attr.sample_regs_user;
0807d2d8 899 bool swapped = evsel->needs_swap;
d0dd74e8
ACM
900 const u64 *array;
901
936be503
DA
902 /*
903 * used for cross-endian analysis. See git commit 65014ab3
904 * for why this goofiness is needed.
905 */
6a11f92e 906 union u64_swap u;
936be503 907
f3bda2c9 908 memset(data, 0, sizeof(*data));
d0dd74e8
ACM
909 data->cpu = data->pid = data->tid = -1;
910 data->stream_id = data->id = data->time = -1ULL;
a4a03fc7 911 data->period = 1;
d0dd74e8
ACM
912
913 if (event->header.type != PERF_RECORD_SAMPLE) {
a3f698fe 914 if (!evsel->attr.sample_id_all)
d0dd74e8 915 return 0;
0807d2d8 916 return perf_evsel__parse_id_sample(evsel, event, data);
d0dd74e8
ACM
917 }
918
919 array = event->sample.array;
920
a3f698fe 921 if (evsel->sample_size + sizeof(event->header) > event->header.size)
a2854124
FW
922 return -EFAULT;
923
d0dd74e8
ACM
924 if (type & PERF_SAMPLE_IP) {
925 data->ip = event->ip.ip;
926 array++;
927 }
928
929 if (type & PERF_SAMPLE_TID) {
936be503
DA
930 u.val64 = *array;
931 if (swapped) {
932 /* undo swap of u64, then swap on individual u32s */
933 u.val64 = bswap_64(u.val64);
934 u.val32[0] = bswap_32(u.val32[0]);
935 u.val32[1] = bswap_32(u.val32[1]);
936 }
937
938 data->pid = u.val32[0];
939 data->tid = u.val32[1];
d0dd74e8
ACM
940 array++;
941 }
942
943 if (type & PERF_SAMPLE_TIME) {
944 data->time = *array;
945 array++;
946 }
947
7cec0922 948 data->addr = 0;
d0dd74e8
ACM
949 if (type & PERF_SAMPLE_ADDR) {
950 data->addr = *array;
951 array++;
952 }
953
954 data->id = -1ULL;
955 if (type & PERF_SAMPLE_ID) {
956 data->id = *array;
957 array++;
958 }
959
960 if (type & PERF_SAMPLE_STREAM_ID) {
961 data->stream_id = *array;
962 array++;
963 }
964
965 if (type & PERF_SAMPLE_CPU) {
936be503
DA
966
967 u.val64 = *array;
968 if (swapped) {
969 /* undo swap of u64, then swap on individual u32s */
970 u.val64 = bswap_64(u.val64);
971 u.val32[0] = bswap_32(u.val32[0]);
972 }
973
974 data->cpu = u.val32[0];
d0dd74e8
ACM
975 array++;
976 }
977
978 if (type & PERF_SAMPLE_PERIOD) {
979 data->period = *array;
980 array++;
981 }
982
983 if (type & PERF_SAMPLE_READ) {
f9d36996 984 fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
d0dd74e8
ACM
985 return -1;
986 }
987
988 if (type & PERF_SAMPLE_CALLCHAIN) {
98e1da90
FW
989 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
990 return -EFAULT;
991
d0dd74e8 992 data->callchain = (struct ip_callchain *)array;
98e1da90
FW
993
994 if (sample_overlap(event, array, data->callchain->nr))
995 return -EFAULT;
996
d0dd74e8
ACM
997 array += 1 + data->callchain->nr;
998 }
999
1000 if (type & PERF_SAMPLE_RAW) {
8e303f20
JO
1001 const u64 *pdata;
1002
936be503
DA
1003 u.val64 = *array;
1004 if (WARN_ONCE(swapped,
1005 "Endianness of raw data not corrected!\n")) {
1006 /* undo swap of u64, then swap on individual u32s */
1007 u.val64 = bswap_64(u.val64);
1008 u.val32[0] = bswap_32(u.val32[0]);
1009 u.val32[1] = bswap_32(u.val32[1]);
1010 }
98e1da90
FW
1011
1012 if (sample_overlap(event, array, sizeof(u32)))
1013 return -EFAULT;
1014
936be503 1015 data->raw_size = u.val32[0];
8e303f20 1016 pdata = (void *) array + sizeof(u32);
98e1da90 1017
8e303f20 1018 if (sample_overlap(event, pdata, data->raw_size))
98e1da90
FW
1019 return -EFAULT;
1020
8e303f20 1021 data->raw_data = (void *) pdata;
fa30c964
SE
1022
1023 array = (void *)array + data->raw_size + sizeof(u32);
d0dd74e8
ACM
1024 }
1025
b5387528
RAV
1026 if (type & PERF_SAMPLE_BRANCH_STACK) {
1027 u64 sz;
1028
1029 data->branch_stack = (struct branch_stack *)array;
1030 array++; /* nr */
1031
1032 sz = data->branch_stack->nr * sizeof(struct branch_entry);
1033 sz /= sizeof(u64);
1034 array += sz;
1035 }
0f6a3015
JO
1036
1037 if (type & PERF_SAMPLE_REGS_USER) {
1038 /* First u64 tells us if we have any regs in sample. */
1039 u64 avail = *array++;
1040
1041 if (avail) {
1042 data->user_regs.regs = (u64 *)array;
1043 array += hweight_long(regs_user);
1044 }
1045 }
1046
1047 if (type & PERF_SAMPLE_STACK_USER) {
1048 u64 size = *array++;
1049
1050 data->user_stack.offset = ((char *)(array - 1)
1051 - (char *) event);
1052
1053 if (!size) {
1054 data->user_stack.size = 0;
1055 } else {
1056 data->user_stack.data = (char *)array;
1057 array += size / sizeof(*array);
1058 data->user_stack.size = *array;
1059 }
1060 }
1061
d0dd74e8
ACM
1062 return 0;
1063}
74eec26f
AV
1064
1065int perf_event__synthesize_sample(union perf_event *event, u64 type,
1066 const struct perf_sample *sample,
1067 bool swapped)
1068{
1069 u64 *array;
1070
1071 /*
1072 * used for cross-endian analysis. See git commit 65014ab3
1073 * for why this goofiness is needed.
1074 */
6a11f92e 1075 union u64_swap u;
74eec26f
AV
1076
1077 array = event->sample.array;
1078
1079 if (type & PERF_SAMPLE_IP) {
1080 event->ip.ip = sample->ip;
1081 array++;
1082 }
1083
1084 if (type & PERF_SAMPLE_TID) {
1085 u.val32[0] = sample->pid;
1086 u.val32[1] = sample->tid;
1087 if (swapped) {
1088 /*
a3f698fe 1089 * Inverse of what is done in perf_evsel__parse_sample
74eec26f
AV
1090 */
1091 u.val32[0] = bswap_32(u.val32[0]);
1092 u.val32[1] = bswap_32(u.val32[1]);
1093 u.val64 = bswap_64(u.val64);
1094 }
1095
1096 *array = u.val64;
1097 array++;
1098 }
1099
1100 if (type & PERF_SAMPLE_TIME) {
1101 *array = sample->time;
1102 array++;
1103 }
1104
1105 if (type & PERF_SAMPLE_ADDR) {
1106 *array = sample->addr;
1107 array++;
1108 }
1109
1110 if (type & PERF_SAMPLE_ID) {
1111 *array = sample->id;
1112 array++;
1113 }
1114
1115 if (type & PERF_SAMPLE_STREAM_ID) {
1116 *array = sample->stream_id;
1117 array++;
1118 }
1119
1120 if (type & PERF_SAMPLE_CPU) {
1121 u.val32[0] = sample->cpu;
1122 if (swapped) {
1123 /*
a3f698fe 1124 * Inverse of what is done in perf_evsel__parse_sample
74eec26f
AV
1125 */
1126 u.val32[0] = bswap_32(u.val32[0]);
1127 u.val64 = bswap_64(u.val64);
1128 }
1129 *array = u.val64;
1130 array++;
1131 }
1132
1133 if (type & PERF_SAMPLE_PERIOD) {
1134 *array = sample->period;
1135 array++;
1136 }
1137
1138 return 0;
1139}
5555ded4 1140
efd2b924
ACM
1141struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
1142{
1143 return pevent_find_field(evsel->tp_format, name);
1144}
1145
5d2074ea 1146void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
5555ded4
ACM
1147 const char *name)
1148{
efd2b924 1149 struct format_field *field = perf_evsel__field(evsel, name);
5555ded4
ACM
1150 int offset;
1151
efd2b924
ACM
1152 if (!field)
1153 return NULL;
5555ded4
ACM
1154
1155 offset = field->offset;
1156
1157 if (field->flags & FIELD_IS_DYNAMIC) {
1158 offset = *(int *)(sample->raw_data + field->offset);
1159 offset &= 0xffff;
1160 }
1161
1162 return sample->raw_data + offset;
1163}
1164
1165u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
1166 const char *name)
1167{
efd2b924 1168 struct format_field *field = perf_evsel__field(evsel, name);
e6b6f679
ACM
1169 void *ptr;
1170 u64 value;
5555ded4 1171
efd2b924
ACM
1172 if (!field)
1173 return 0;
5555ded4 1174
e6b6f679 1175 ptr = sample->raw_data + field->offset;
5555ded4 1176
e6b6f679
ACM
1177 switch (field->size) {
1178 case 1:
1179 return *(u8 *)ptr;
1180 case 2:
1181 value = *(u16 *)ptr;
1182 break;
1183 case 4:
1184 value = *(u32 *)ptr;
1185 break;
1186 case 8:
1187 value = *(u64 *)ptr;
1188 break;
1189 default:
1190 return 0;
1191 }
1192
1193 if (!evsel->needs_swap)
1194 return value;
1195
1196 switch (field->size) {
1197 case 2:
1198 return bswap_16(value);
1199 case 4:
1200 return bswap_32(value);
1201 case 8:
1202 return bswap_64(value);
1203 default:
1204 return 0;
1205 }
1206
1207 return 0;
5555ded4 1208}
This page took 0.156668 seconds and 5 git commands to generate.