perf evsel: Introduce event fallback method
[deliverable/linux.git] / tools / perf / util / evsel.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
936be503 10#include <byteswap.h>
0f6a3015 11#include <linux/bitops.h>
936be503 12#include "asm/bug.h"
efd2b924 13#include "debugfs.h"
5555ded4 14#include "event-parse.h"
69aad6f1 15#include "evsel.h"
70082dd9 16#include "evlist.h"
69aad6f1 17#include "util.h"
86bd5e86 18#include "cpumap.h"
fd78260b 19#include "thread_map.h"
12864b31 20#include "target.h"
d2709c7c
DH
21#include <linux/hw_breakpoint.h>
22#include <linux/perf_event.h>
26d33022 23#include "perf_regs.h"
69aad6f1 24
594ac61a
ACM
25static struct {
26 bool sample_id_all;
27 bool exclude_guest;
28} perf_missing_features;
29
c52b12ed
ACM
30#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
31
bde09467 32static int __perf_evsel__sample_size(u64 sample_type)
c2a70653
ACM
33{
34 u64 mask = sample_type & PERF_SAMPLE_MASK;
35 int size = 0;
36 int i;
37
38 for (i = 0; i < 64; i++) {
39 if (mask & (1ULL << i))
40 size++;
41 }
42
43 size *= sizeof(u64);
44
45 return size;
46}
47
4bf9ce1b 48void hists__init(struct hists *hists)
0e2a5f10
ACM
49{
50 memset(hists, 0, sizeof(*hists));
51 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
52 hists->entries_in = &hists->entries_in_array[0];
53 hists->entries_collapsed = RB_ROOT;
54 hists->entries = RB_ROOT;
55 pthread_mutex_init(&hists->lock, NULL);
56}
57
7be5ebe8
ACM
58void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
59 enum perf_event_sample_format bit)
60{
61 if (!(evsel->attr.sample_type & bit)) {
62 evsel->attr.sample_type |= bit;
63 evsel->sample_size += sizeof(u64);
64 }
65}
66
67void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
68 enum perf_event_sample_format bit)
69{
70 if (evsel->attr.sample_type & bit) {
71 evsel->attr.sample_type &= ~bit;
72 evsel->sample_size -= sizeof(u64);
73 }
74}
75
7a5a5ca5
ACM
76void perf_evsel__set_sample_id(struct perf_evsel *evsel)
77{
78 perf_evsel__set_sample_bit(evsel, ID);
79 evsel->attr.read_format |= PERF_FORMAT_ID;
80}
81
ef1d1af2
ACM
82void perf_evsel__init(struct perf_evsel *evsel,
83 struct perf_event_attr *attr, int idx)
84{
85 evsel->idx = idx;
86 evsel->attr = *attr;
2cfda562 87 evsel->leader = evsel;
ef1d1af2 88 INIT_LIST_HEAD(&evsel->node);
1980c2eb 89 hists__init(&evsel->hists);
bde09467 90 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
ef1d1af2
ACM
91}
92
23a2f3ab 93struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
69aad6f1
ACM
94{
95 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
96
ef1d1af2
ACM
97 if (evsel != NULL)
98 perf_evsel__init(evsel, attr, idx);
69aad6f1
ACM
99
100 return evsel;
101}
102
201b7334 103struct event_format *event_format__new(const char *sys, const char *name)
efd2b924
ACM
104{
105 int fd, n;
106 char *filename;
107 void *bf = NULL, *nbf;
108 size_t size = 0, alloc_size = 0;
109 struct event_format *format = NULL;
110
111 if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
112 goto out;
113
114 fd = open(filename, O_RDONLY);
115 if (fd < 0)
116 goto out_free_filename;
117
118 do {
119 if (size == alloc_size) {
120 alloc_size += BUFSIZ;
121 nbf = realloc(bf, alloc_size);
122 if (nbf == NULL)
123 goto out_free_bf;
124 bf = nbf;
125 }
126
127 n = read(fd, bf + size, BUFSIZ);
128 if (n < 0)
129 goto out_free_bf;
130 size += n;
131 } while (n > 0);
132
133 pevent_parse_format(&format, bf, size, sys);
134
135out_free_bf:
136 free(bf);
137 close(fd);
138out_free_filename:
139 free(filename);
140out:
141 return format;
142}
143
144struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
145{
146 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
147
148 if (evsel != NULL) {
149 struct perf_event_attr attr = {
0b80f8b3
ACM
150 .type = PERF_TYPE_TRACEPOINT,
151 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
152 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
efd2b924
ACM
153 };
154
e48ffe2b
ACM
155 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
156 goto out_free;
157
efd2b924
ACM
158 evsel->tp_format = event_format__new(sys, name);
159 if (evsel->tp_format == NULL)
160 goto out_free;
161
0b80f8b3 162 event_attr_init(&attr);
efd2b924 163 attr.config = evsel->tp_format->id;
0b80f8b3 164 attr.sample_period = 1;
efd2b924 165 perf_evsel__init(evsel, &attr, idx);
efd2b924
ACM
166 }
167
168 return evsel;
169
170out_free:
e48ffe2b 171 free(evsel->name);
efd2b924
ACM
172 free(evsel);
173 return NULL;
174}
175
8ad7013b 176const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
c410431c
ACM
177 "cycles",
178 "instructions",
179 "cache-references",
180 "cache-misses",
181 "branches",
182 "branch-misses",
183 "bus-cycles",
184 "stalled-cycles-frontend",
185 "stalled-cycles-backend",
186 "ref-cycles",
187};
188
dd4f5223 189static const char *__perf_evsel__hw_name(u64 config)
c410431c
ACM
190{
191 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
192 return perf_evsel__hw_names[config];
193
194 return "unknown-hardware";
195}
196
27f18617 197static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
c410431c 198{
27f18617 199 int colon = 0, r = 0;
c410431c 200 struct perf_event_attr *attr = &evsel->attr;
c410431c
ACM
201 bool exclude_guest_default = false;
202
203#define MOD_PRINT(context, mod) do { \
204 if (!attr->exclude_##context) { \
27f18617 205 if (!colon) colon = ++r; \
c410431c
ACM
206 r += scnprintf(bf + r, size - r, "%c", mod); \
207 } } while(0)
208
209 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
210 MOD_PRINT(kernel, 'k');
211 MOD_PRINT(user, 'u');
212 MOD_PRINT(hv, 'h');
213 exclude_guest_default = true;
214 }
215
216 if (attr->precise_ip) {
217 if (!colon)
27f18617 218 colon = ++r;
c410431c
ACM
219 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
220 exclude_guest_default = true;
221 }
222
223 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
224 MOD_PRINT(host, 'H');
225 MOD_PRINT(guest, 'G');
226 }
227#undef MOD_PRINT
228 if (colon)
27f18617 229 bf[colon - 1] = ':';
c410431c
ACM
230 return r;
231}
232
27f18617
ACM
233static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
234{
235 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
236 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
237}
238
8ad7013b 239const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
335c2f5d
ACM
240 "cpu-clock",
241 "task-clock",
242 "page-faults",
243 "context-switches",
8ad7013b 244 "cpu-migrations",
335c2f5d
ACM
245 "minor-faults",
246 "major-faults",
247 "alignment-faults",
248 "emulation-faults",
249};
250
dd4f5223 251static const char *__perf_evsel__sw_name(u64 config)
335c2f5d
ACM
252{
253 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
254 return perf_evsel__sw_names[config];
255 return "unknown-software";
256}
257
258static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
259{
260 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
261 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
262}
263
287e74aa
JO
264static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
265{
266 int r;
267
268 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
269
270 if (type & HW_BREAKPOINT_R)
271 r += scnprintf(bf + r, size - r, "r");
272
273 if (type & HW_BREAKPOINT_W)
274 r += scnprintf(bf + r, size - r, "w");
275
276 if (type & HW_BREAKPOINT_X)
277 r += scnprintf(bf + r, size - r, "x");
278
279 return r;
280}
281
282static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
283{
284 struct perf_event_attr *attr = &evsel->attr;
285 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
286 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
287}
288
0b668bc9
ACM
289const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
290 [PERF_EVSEL__MAX_ALIASES] = {
291 { "L1-dcache", "l1-d", "l1d", "L1-data", },
292 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
293 { "LLC", "L2", },
294 { "dTLB", "d-tlb", "Data-TLB", },
295 { "iTLB", "i-tlb", "Instruction-TLB", },
296 { "branch", "branches", "bpu", "btb", "bpc", },
297 { "node", },
298};
299
300const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
301 [PERF_EVSEL__MAX_ALIASES] = {
302 { "load", "loads", "read", },
303 { "store", "stores", "write", },
304 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
305};
306
307const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
308 [PERF_EVSEL__MAX_ALIASES] = {
309 { "refs", "Reference", "ops", "access", },
310 { "misses", "miss", },
311};
312
313#define C(x) PERF_COUNT_HW_CACHE_##x
314#define CACHE_READ (1 << C(OP_READ))
315#define CACHE_WRITE (1 << C(OP_WRITE))
316#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
317#define COP(x) (1 << x)
318
319/*
320 * cache operartion stat
321 * L1I : Read and prefetch only
322 * ITLB and BPU : Read-only
323 */
324static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
325 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
326 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
327 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
328 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
329 [C(ITLB)] = (CACHE_READ),
330 [C(BPU)] = (CACHE_READ),
331 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
332};
333
334bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
335{
336 if (perf_evsel__hw_cache_stat[type] & COP(op))
337 return true; /* valid */
338 else
339 return false; /* invalid */
340}
341
342int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
343 char *bf, size_t size)
344{
345 if (result) {
346 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
347 perf_evsel__hw_cache_op[op][0],
348 perf_evsel__hw_cache_result[result][0]);
349 }
350
351 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
352 perf_evsel__hw_cache_op[op][1]);
353}
354
dd4f5223 355static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
0b668bc9
ACM
356{
357 u8 op, result, type = (config >> 0) & 0xff;
358 const char *err = "unknown-ext-hardware-cache-type";
359
360 if (type > PERF_COUNT_HW_CACHE_MAX)
361 goto out_err;
362
363 op = (config >> 8) & 0xff;
364 err = "unknown-ext-hardware-cache-op";
365 if (op > PERF_COUNT_HW_CACHE_OP_MAX)
366 goto out_err;
367
368 result = (config >> 16) & 0xff;
369 err = "unknown-ext-hardware-cache-result";
370 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
371 goto out_err;
372
373 err = "invalid-cache";
374 if (!perf_evsel__is_cache_op_valid(type, op))
375 goto out_err;
376
377 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
378out_err:
379 return scnprintf(bf, size, "%s", err);
380}
381
382static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
383{
384 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
385 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
386}
387
6eef3d9c
ACM
388static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
389{
390 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
391 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
392}
393
7289f83c 394const char *perf_evsel__name(struct perf_evsel *evsel)
a4460836 395{
7289f83c 396 char bf[128];
a4460836 397
7289f83c
ACM
398 if (evsel->name)
399 return evsel->name;
c410431c
ACM
400
401 switch (evsel->attr.type) {
402 case PERF_TYPE_RAW:
6eef3d9c 403 perf_evsel__raw_name(evsel, bf, sizeof(bf));
c410431c
ACM
404 break;
405
406 case PERF_TYPE_HARDWARE:
7289f83c 407 perf_evsel__hw_name(evsel, bf, sizeof(bf));
c410431c 408 break;
0b668bc9
ACM
409
410 case PERF_TYPE_HW_CACHE:
7289f83c 411 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
0b668bc9
ACM
412 break;
413
335c2f5d 414 case PERF_TYPE_SOFTWARE:
7289f83c 415 perf_evsel__sw_name(evsel, bf, sizeof(bf));
335c2f5d
ACM
416 break;
417
a4460836 418 case PERF_TYPE_TRACEPOINT:
7289f83c 419 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
a4460836
ACM
420 break;
421
287e74aa
JO
422 case PERF_TYPE_BREAKPOINT:
423 perf_evsel__bp_name(evsel, bf, sizeof(bf));
424 break;
425
c410431c 426 default:
ca1b1457
RR
427 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
428 evsel->attr.type);
a4460836 429 break;
c410431c
ACM
430 }
431
7289f83c
ACM
432 evsel->name = strdup(bf);
433
434 return evsel->name ?: "unknown";
c410431c
ACM
435}
436
774cb499
JO
437/*
438 * The enable_on_exec/disabled value strategy:
439 *
440 * 1) For any type of traced program:
441 * - all independent events and group leaders are disabled
442 * - all group members are enabled
443 *
444 * Group members are ruled by group leaders. They need to
445 * be enabled, because the group scheduling relies on that.
446 *
447 * 2) For traced programs executed by perf:
448 * - all independent events and group leaders have
449 * enable_on_exec set
450 * - we don't specifically enable or disable any event during
451 * the record command
452 *
453 * Independent events and group leaders are initially disabled
454 * and get enabled by exec. Group members are ruled by group
455 * leaders as stated in 1).
456 *
457 * 3) For traced programs attached by perf (pid/tid):
458 * - we specifically enable or disable all events during
459 * the record command
460 *
461 * When attaching events to already running traced we
462 * enable/disable events specifically, as there's no
463 * initial traced exec call.
464 */
cac21425
JO
465void perf_evsel__config(struct perf_evsel *evsel,
466 struct perf_record_opts *opts)
0f82ebc4
ACM
467{
468 struct perf_event_attr *attr = &evsel->attr;
469 int track = !evsel->idx; /* only the first counter needs these */
470
594ac61a 471 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
0f82ebc4 472 attr->inherit = !opts->no_inherit;
0f82ebc4 473
7be5ebe8
ACM
474 perf_evsel__set_sample_bit(evsel, IP);
475 perf_evsel__set_sample_bit(evsel, TID);
0f82ebc4
ACM
476
477 /*
478 * We default some events to a 1 default interval. But keep
479 * it a weak assumption overridable by the user.
480 */
481 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
482 opts->user_interval != ULLONG_MAX)) {
483 if (opts->freq) {
7be5ebe8 484 perf_evsel__set_sample_bit(evsel, PERIOD);
0f82ebc4
ACM
485 attr->freq = 1;
486 attr->sample_freq = opts->freq;
487 } else {
488 attr->sample_period = opts->default_interval;
489 }
490 }
491
492 if (opts->no_samples)
493 attr->sample_freq = 0;
494
495 if (opts->inherit_stat)
496 attr->inherit_stat = 1;
497
498 if (opts->sample_address) {
7be5ebe8 499 perf_evsel__set_sample_bit(evsel, ADDR);
0f82ebc4
ACM
500 attr->mmap_data = track;
501 }
502
26d33022 503 if (opts->call_graph) {
7be5ebe8 504 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
0f82ebc4 505
26d33022 506 if (opts->call_graph == CALLCHAIN_DWARF) {
7be5ebe8
ACM
507 perf_evsel__set_sample_bit(evsel, REGS_USER);
508 perf_evsel__set_sample_bit(evsel, STACK_USER);
26d33022
JO
509 attr->sample_regs_user = PERF_REGS_MASK;
510 attr->sample_stack_user = opts->stack_dump_size;
511 attr->exclude_callchain_user = 1;
512 }
513 }
514
e40ee742 515 if (perf_target__has_cpu(&opts->target))
7be5ebe8 516 perf_evsel__set_sample_bit(evsel, CPU);
0f82ebc4 517
3e76ac78 518 if (opts->period)
7be5ebe8 519 perf_evsel__set_sample_bit(evsel, PERIOD);
3e76ac78 520
594ac61a 521 if (!perf_missing_features.sample_id_all &&
d67356e7 522 (opts->sample_time || !opts->no_inherit ||
aa22dd49 523 perf_target__has_cpu(&opts->target)))
7be5ebe8 524 perf_evsel__set_sample_bit(evsel, TIME);
0f82ebc4
ACM
525
526 if (opts->raw_samples) {
7be5ebe8
ACM
527 perf_evsel__set_sample_bit(evsel, TIME);
528 perf_evsel__set_sample_bit(evsel, RAW);
529 perf_evsel__set_sample_bit(evsel, CPU);
0f82ebc4
ACM
530 }
531
532 if (opts->no_delay) {
533 attr->watermark = 0;
534 attr->wakeup_events = 1;
535 }
bdfebd84 536 if (opts->branch_stack) {
7be5ebe8 537 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
bdfebd84
RAV
538 attr->branch_sample_type = opts->branch_stack;
539 }
0f82ebc4
ACM
540
541 attr->mmap = track;
542 attr->comm = track;
543
774cb499
JO
544 /*
545 * XXX see the function comment above
546 *
547 * Disabling only independent events or group leaders,
548 * keeping group members enabled.
549 */
823254ed 550 if (perf_evsel__is_group_leader(evsel))
774cb499
JO
551 attr->disabled = 1;
552
553 /*
554 * Setting enable_on_exec for independent events and
555 * group leaders for traced executed by perf.
556 */
823254ed 557 if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
0f82ebc4 558 attr->enable_on_exec = 1;
0f82ebc4
ACM
559}
560
69aad6f1
ACM
561int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
562{
4af4c955 563 int cpu, thread;
69aad6f1 564 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
4af4c955
DA
565
566 if (evsel->fd) {
567 for (cpu = 0; cpu < ncpus; cpu++) {
568 for (thread = 0; thread < nthreads; thread++) {
569 FD(evsel, cpu, thread) = -1;
570 }
571 }
572 }
573
69aad6f1
ACM
574 return evsel->fd != NULL ? 0 : -ENOMEM;
575}
576
745cefc5
ACM
577int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
578 const char *filter)
579{
580 int cpu, thread;
581
582 for (cpu = 0; cpu < ncpus; cpu++) {
583 for (thread = 0; thread < nthreads; thread++) {
584 int fd = FD(evsel, cpu, thread),
585 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
586
587 if (err)
588 return err;
589 }
590 }
591
592 return 0;
593}
594
70db7533
ACM
595int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
596{
a91e5431
ACM
597 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
598 if (evsel->sample_id == NULL)
599 return -ENOMEM;
600
601 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
602 if (evsel->id == NULL) {
603 xyarray__delete(evsel->sample_id);
604 evsel->sample_id = NULL;
605 return -ENOMEM;
606 }
607
608 return 0;
70db7533
ACM
609}
610
c52b12ed
ACM
611int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
612{
613 evsel->counts = zalloc((sizeof(*evsel->counts) +
614 (ncpus * sizeof(struct perf_counts_values))));
615 return evsel->counts != NULL ? 0 : -ENOMEM;
616}
617
69aad6f1
ACM
618void perf_evsel__free_fd(struct perf_evsel *evsel)
619{
620 xyarray__delete(evsel->fd);
621 evsel->fd = NULL;
622}
623
70db7533
ACM
624void perf_evsel__free_id(struct perf_evsel *evsel)
625{
a91e5431
ACM
626 xyarray__delete(evsel->sample_id);
627 evsel->sample_id = NULL;
628 free(evsel->id);
70db7533
ACM
629 evsel->id = NULL;
630}
631
c52b12ed
ACM
632void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
633{
634 int cpu, thread;
635
636 for (cpu = 0; cpu < ncpus; cpu++)
637 for (thread = 0; thread < nthreads; ++thread) {
638 close(FD(evsel, cpu, thread));
639 FD(evsel, cpu, thread) = -1;
640 }
641}
642
ef1d1af2 643void perf_evsel__exit(struct perf_evsel *evsel)
69aad6f1
ACM
644{
645 assert(list_empty(&evsel->node));
646 xyarray__delete(evsel->fd);
a91e5431
ACM
647 xyarray__delete(evsel->sample_id);
648 free(evsel->id);
ef1d1af2
ACM
649}
650
651void perf_evsel__delete(struct perf_evsel *evsel)
652{
653 perf_evsel__exit(evsel);
023695d9 654 close_cgroup(evsel->cgrp);
6a4bb04c 655 free(evsel->group_name);
e48ffe2b 656 if (evsel->tp_format)
efd2b924 657 pevent_free_format(evsel->tp_format);
f0c55bcf 658 free(evsel->name);
69aad6f1
ACM
659 free(evsel);
660}
c52b12ed
ACM
661
662int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
663 int cpu, int thread, bool scale)
664{
665 struct perf_counts_values count;
666 size_t nv = scale ? 3 : 1;
667
668 if (FD(evsel, cpu, thread) < 0)
669 return -EINVAL;
670
4eed11d5
ACM
671 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
672 return -ENOMEM;
673
c52b12ed
ACM
674 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
675 return -errno;
676
677 if (scale) {
678 if (count.run == 0)
679 count.val = 0;
680 else if (count.run < count.ena)
681 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
682 } else
683 count.ena = count.run = 0;
684
685 evsel->counts->cpu[cpu] = count;
686 return 0;
687}
688
689int __perf_evsel__read(struct perf_evsel *evsel,
690 int ncpus, int nthreads, bool scale)
691{
692 size_t nv = scale ? 3 : 1;
693 int cpu, thread;
694 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
695
52bcd994 696 aggr->val = aggr->ena = aggr->run = 0;
c52b12ed
ACM
697
698 for (cpu = 0; cpu < ncpus; cpu++) {
699 for (thread = 0; thread < nthreads; thread++) {
700 if (FD(evsel, cpu, thread) < 0)
701 continue;
702
703 if (readn(FD(evsel, cpu, thread),
704 &count, nv * sizeof(u64)) < 0)
705 return -errno;
706
707 aggr->val += count.val;
708 if (scale) {
709 aggr->ena += count.ena;
710 aggr->run += count.run;
711 }
712 }
713 }
714
715 evsel->counts->scaled = 0;
716 if (scale) {
717 if (aggr->run == 0) {
718 evsel->counts->scaled = -1;
719 aggr->val = 0;
720 return 0;
721 }
722
723 if (aggr->run < aggr->ena) {
724 evsel->counts->scaled = 1;
725 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
726 }
727 } else
728 aggr->ena = aggr->run = 0;
729
730 return 0;
731}
48290609 732
6a4bb04c
JO
733static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
734{
735 struct perf_evsel *leader = evsel->leader;
736 int fd;
737
823254ed 738 if (perf_evsel__is_group_leader(evsel))
6a4bb04c
JO
739 return -1;
740
741 /*
742 * Leader must be already processed/open,
743 * if not it's a bug.
744 */
745 BUG_ON(!leader->fd);
746
747 fd = FD(leader, cpu, thread);
748 BUG_ON(fd == -1);
749
750 return fd;
751}
752
0252208e 753static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
6a4bb04c 754 struct thread_map *threads)
48290609 755{
0252208e 756 int cpu, thread;
023695d9 757 unsigned long flags = 0;
727ab04e 758 int pid = -1, err;
48290609 759
0252208e
ACM
760 if (evsel->fd == NULL &&
761 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
727ab04e 762 return -ENOMEM;
4eed11d5 763
023695d9
SE
764 if (evsel->cgrp) {
765 flags = PERF_FLAG_PID_CGROUP;
766 pid = evsel->cgrp->fd;
767 }
768
594ac61a
ACM
769fallback_missing_features:
770 if (perf_missing_features.exclude_guest)
771 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
772retry_sample_id:
773 if (perf_missing_features.sample_id_all)
774 evsel->attr.sample_id_all = 0;
775
86bd5e86 776 for (cpu = 0; cpu < cpus->nr; cpu++) {
9d04f178 777
0252208e 778 for (thread = 0; thread < threads->nr; thread++) {
6a4bb04c 779 int group_fd;
023695d9
SE
780
781 if (!evsel->cgrp)
782 pid = threads->map[thread];
783
6a4bb04c
JO
784 group_fd = get_group_fd(evsel, cpu, thread);
785
0252208e 786 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
023695d9 787 pid,
f08199d3 788 cpus->map[cpu],
023695d9 789 group_fd, flags);
727ab04e
ACM
790 if (FD(evsel, cpu, thread) < 0) {
791 err = -errno;
594ac61a 792 goto try_fallback;
727ab04e 793 }
0252208e 794 }
48290609
ACM
795 }
796
797 return 0;
798
594ac61a
ACM
799try_fallback:
800 if (err != -EINVAL || cpu > 0 || thread > 0)
801 goto out_close;
802
803 if (!perf_missing_features.exclude_guest &&
804 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
805 perf_missing_features.exclude_guest = true;
806 goto fallback_missing_features;
807 } else if (!perf_missing_features.sample_id_all) {
808 perf_missing_features.sample_id_all = true;
809 goto retry_sample_id;
810 }
811
48290609 812out_close:
0252208e
ACM
813 do {
814 while (--thread >= 0) {
815 close(FD(evsel, cpu, thread));
816 FD(evsel, cpu, thread) = -1;
817 }
818 thread = threads->nr;
819 } while (--cpu >= 0);
727ab04e
ACM
820 return err;
821}
822
823void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
824{
825 if (evsel->fd == NULL)
826 return;
827
828 perf_evsel__close_fd(evsel, ncpus, nthreads);
829 perf_evsel__free_fd(evsel);
830 evsel->fd = NULL;
48290609
ACM
831}
832
0252208e
ACM
833static struct {
834 struct cpu_map map;
835 int cpus[1];
836} empty_cpu_map = {
837 .map.nr = 1,
838 .cpus = { -1, },
839};
840
841static struct {
842 struct thread_map map;
843 int threads[1];
844} empty_thread_map = {
845 .map.nr = 1,
846 .threads = { -1, },
847};
848
f08199d3 849int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
6a4bb04c 850 struct thread_map *threads)
48290609 851{
0252208e
ACM
852 if (cpus == NULL) {
853 /* Work around old compiler warnings about strict aliasing */
854 cpus = &empty_cpu_map.map;
48290609
ACM
855 }
856
0252208e
ACM
857 if (threads == NULL)
858 threads = &empty_thread_map.map;
48290609 859
6a4bb04c 860 return __perf_evsel__open(evsel, cpus, threads);
48290609
ACM
861}
862
f08199d3 863int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
6a4bb04c 864 struct cpu_map *cpus)
48290609 865{
6a4bb04c 866 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
0252208e 867}
48290609 868
f08199d3 869int perf_evsel__open_per_thread(struct perf_evsel *evsel,
6a4bb04c 870 struct thread_map *threads)
0252208e 871{
6a4bb04c 872 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
48290609 873}
70082dd9 874
0807d2d8
ACM
875static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
876 const union perf_event *event,
877 struct perf_sample *sample)
d0dd74e8 878{
0807d2d8 879 u64 type = evsel->attr.sample_type;
d0dd74e8 880 const u64 *array = event->sample.array;
0807d2d8 881 bool swapped = evsel->needs_swap;
37073f9e 882 union u64_swap u;
d0dd74e8
ACM
883
884 array += ((event->header.size -
885 sizeof(event->header)) / sizeof(u64)) - 1;
886
887 if (type & PERF_SAMPLE_CPU) {
37073f9e
JO
888 u.val64 = *array;
889 if (swapped) {
890 /* undo swap of u64, then swap on individual u32s */
891 u.val64 = bswap_64(u.val64);
892 u.val32[0] = bswap_32(u.val32[0]);
893 }
894
895 sample->cpu = u.val32[0];
d0dd74e8
ACM
896 array--;
897 }
898
899 if (type & PERF_SAMPLE_STREAM_ID) {
900 sample->stream_id = *array;
901 array--;
902 }
903
904 if (type & PERF_SAMPLE_ID) {
905 sample->id = *array;
906 array--;
907 }
908
909 if (type & PERF_SAMPLE_TIME) {
910 sample->time = *array;
911 array--;
912 }
913
914 if (type & PERF_SAMPLE_TID) {
37073f9e
JO
915 u.val64 = *array;
916 if (swapped) {
917 /* undo swap of u64, then swap on individual u32s */
918 u.val64 = bswap_64(u.val64);
919 u.val32[0] = bswap_32(u.val32[0]);
920 u.val32[1] = bswap_32(u.val32[1]);
921 }
922
923 sample->pid = u.val32[0];
924 sample->tid = u.val32[1];
d0dd74e8
ACM
925 }
926
927 return 0;
928}
929
98e1da90
FW
930static bool sample_overlap(const union perf_event *event,
931 const void *offset, u64 size)
932{
933 const void *base = event;
934
935 if (offset + size > base + event->header.size)
936 return true;
937
938 return false;
939}
940
a3f698fe 941int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
0807d2d8 942 struct perf_sample *data)
d0dd74e8 943{
a3f698fe 944 u64 type = evsel->attr.sample_type;
0f6a3015 945 u64 regs_user = evsel->attr.sample_regs_user;
0807d2d8 946 bool swapped = evsel->needs_swap;
d0dd74e8
ACM
947 const u64 *array;
948
936be503
DA
949 /*
950 * used for cross-endian analysis. See git commit 65014ab3
951 * for why this goofiness is needed.
952 */
6a11f92e 953 union u64_swap u;
936be503 954
f3bda2c9 955 memset(data, 0, sizeof(*data));
d0dd74e8
ACM
956 data->cpu = data->pid = data->tid = -1;
957 data->stream_id = data->id = data->time = -1ULL;
a4a03fc7 958 data->period = 1;
d0dd74e8
ACM
959
960 if (event->header.type != PERF_RECORD_SAMPLE) {
a3f698fe 961 if (!evsel->attr.sample_id_all)
d0dd74e8 962 return 0;
0807d2d8 963 return perf_evsel__parse_id_sample(evsel, event, data);
d0dd74e8
ACM
964 }
965
966 array = event->sample.array;
967
a3f698fe 968 if (evsel->sample_size + sizeof(event->header) > event->header.size)
a2854124
FW
969 return -EFAULT;
970
d0dd74e8
ACM
971 if (type & PERF_SAMPLE_IP) {
972 data->ip = event->ip.ip;
973 array++;
974 }
975
976 if (type & PERF_SAMPLE_TID) {
936be503
DA
977 u.val64 = *array;
978 if (swapped) {
979 /* undo swap of u64, then swap on individual u32s */
980 u.val64 = bswap_64(u.val64);
981 u.val32[0] = bswap_32(u.val32[0]);
982 u.val32[1] = bswap_32(u.val32[1]);
983 }
984
985 data->pid = u.val32[0];
986 data->tid = u.val32[1];
d0dd74e8
ACM
987 array++;
988 }
989
990 if (type & PERF_SAMPLE_TIME) {
991 data->time = *array;
992 array++;
993 }
994
7cec0922 995 data->addr = 0;
d0dd74e8
ACM
996 if (type & PERF_SAMPLE_ADDR) {
997 data->addr = *array;
998 array++;
999 }
1000
1001 data->id = -1ULL;
1002 if (type & PERF_SAMPLE_ID) {
1003 data->id = *array;
1004 array++;
1005 }
1006
1007 if (type & PERF_SAMPLE_STREAM_ID) {
1008 data->stream_id = *array;
1009 array++;
1010 }
1011
1012 if (type & PERF_SAMPLE_CPU) {
936be503
DA
1013
1014 u.val64 = *array;
1015 if (swapped) {
1016 /* undo swap of u64, then swap on individual u32s */
1017 u.val64 = bswap_64(u.val64);
1018 u.val32[0] = bswap_32(u.val32[0]);
1019 }
1020
1021 data->cpu = u.val32[0];
d0dd74e8
ACM
1022 array++;
1023 }
1024
1025 if (type & PERF_SAMPLE_PERIOD) {
1026 data->period = *array;
1027 array++;
1028 }
1029
1030 if (type & PERF_SAMPLE_READ) {
f9d36996 1031 fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
d0dd74e8
ACM
1032 return -1;
1033 }
1034
1035 if (type & PERF_SAMPLE_CALLCHAIN) {
98e1da90
FW
1036 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
1037 return -EFAULT;
1038
d0dd74e8 1039 data->callchain = (struct ip_callchain *)array;
98e1da90
FW
1040
1041 if (sample_overlap(event, array, data->callchain->nr))
1042 return -EFAULT;
1043
d0dd74e8
ACM
1044 array += 1 + data->callchain->nr;
1045 }
1046
1047 if (type & PERF_SAMPLE_RAW) {
8e303f20
JO
1048 const u64 *pdata;
1049
936be503
DA
1050 u.val64 = *array;
1051 if (WARN_ONCE(swapped,
1052 "Endianness of raw data not corrected!\n")) {
1053 /* undo swap of u64, then swap on individual u32s */
1054 u.val64 = bswap_64(u.val64);
1055 u.val32[0] = bswap_32(u.val32[0]);
1056 u.val32[1] = bswap_32(u.val32[1]);
1057 }
98e1da90
FW
1058
1059 if (sample_overlap(event, array, sizeof(u32)))
1060 return -EFAULT;
1061
936be503 1062 data->raw_size = u.val32[0];
8e303f20 1063 pdata = (void *) array + sizeof(u32);
98e1da90 1064
8e303f20 1065 if (sample_overlap(event, pdata, data->raw_size))
98e1da90
FW
1066 return -EFAULT;
1067
8e303f20 1068 data->raw_data = (void *) pdata;
fa30c964
SE
1069
1070 array = (void *)array + data->raw_size + sizeof(u32);
d0dd74e8
ACM
1071 }
1072
b5387528
RAV
1073 if (type & PERF_SAMPLE_BRANCH_STACK) {
1074 u64 sz;
1075
1076 data->branch_stack = (struct branch_stack *)array;
1077 array++; /* nr */
1078
1079 sz = data->branch_stack->nr * sizeof(struct branch_entry);
1080 sz /= sizeof(u64);
1081 array += sz;
1082 }
0f6a3015
JO
1083
1084 if (type & PERF_SAMPLE_REGS_USER) {
1085 /* First u64 tells us if we have any regs in sample. */
1086 u64 avail = *array++;
1087
1088 if (avail) {
1089 data->user_regs.regs = (u64 *)array;
1090 array += hweight_long(regs_user);
1091 }
1092 }
1093
1094 if (type & PERF_SAMPLE_STACK_USER) {
1095 u64 size = *array++;
1096
1097 data->user_stack.offset = ((char *)(array - 1)
1098 - (char *) event);
1099
1100 if (!size) {
1101 data->user_stack.size = 0;
1102 } else {
1103 data->user_stack.data = (char *)array;
1104 array += size / sizeof(*array);
1105 data->user_stack.size = *array;
1106 }
1107 }
1108
d0dd74e8
ACM
1109 return 0;
1110}
74eec26f
AV
1111
1112int perf_event__synthesize_sample(union perf_event *event, u64 type,
1113 const struct perf_sample *sample,
1114 bool swapped)
1115{
1116 u64 *array;
1117
1118 /*
1119 * used for cross-endian analysis. See git commit 65014ab3
1120 * for why this goofiness is needed.
1121 */
6a11f92e 1122 union u64_swap u;
74eec26f
AV
1123
1124 array = event->sample.array;
1125
1126 if (type & PERF_SAMPLE_IP) {
1127 event->ip.ip = sample->ip;
1128 array++;
1129 }
1130
1131 if (type & PERF_SAMPLE_TID) {
1132 u.val32[0] = sample->pid;
1133 u.val32[1] = sample->tid;
1134 if (swapped) {
1135 /*
a3f698fe 1136 * Inverse of what is done in perf_evsel__parse_sample
74eec26f
AV
1137 */
1138 u.val32[0] = bswap_32(u.val32[0]);
1139 u.val32[1] = bswap_32(u.val32[1]);
1140 u.val64 = bswap_64(u.val64);
1141 }
1142
1143 *array = u.val64;
1144 array++;
1145 }
1146
1147 if (type & PERF_SAMPLE_TIME) {
1148 *array = sample->time;
1149 array++;
1150 }
1151
1152 if (type & PERF_SAMPLE_ADDR) {
1153 *array = sample->addr;
1154 array++;
1155 }
1156
1157 if (type & PERF_SAMPLE_ID) {
1158 *array = sample->id;
1159 array++;
1160 }
1161
1162 if (type & PERF_SAMPLE_STREAM_ID) {
1163 *array = sample->stream_id;
1164 array++;
1165 }
1166
1167 if (type & PERF_SAMPLE_CPU) {
1168 u.val32[0] = sample->cpu;
1169 if (swapped) {
1170 /*
a3f698fe 1171 * Inverse of what is done in perf_evsel__parse_sample
74eec26f
AV
1172 */
1173 u.val32[0] = bswap_32(u.val32[0]);
1174 u.val64 = bswap_64(u.val64);
1175 }
1176 *array = u.val64;
1177 array++;
1178 }
1179
1180 if (type & PERF_SAMPLE_PERIOD) {
1181 *array = sample->period;
1182 array++;
1183 }
1184
1185 return 0;
1186}
5555ded4 1187
efd2b924
ACM
1188struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
1189{
1190 return pevent_find_field(evsel->tp_format, name);
1191}
1192
5d2074ea 1193void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
5555ded4
ACM
1194 const char *name)
1195{
efd2b924 1196 struct format_field *field = perf_evsel__field(evsel, name);
5555ded4
ACM
1197 int offset;
1198
efd2b924
ACM
1199 if (!field)
1200 return NULL;
5555ded4
ACM
1201
1202 offset = field->offset;
1203
1204 if (field->flags & FIELD_IS_DYNAMIC) {
1205 offset = *(int *)(sample->raw_data + field->offset);
1206 offset &= 0xffff;
1207 }
1208
1209 return sample->raw_data + offset;
1210}
1211
1212u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
1213 const char *name)
1214{
efd2b924 1215 struct format_field *field = perf_evsel__field(evsel, name);
e6b6f679
ACM
1216 void *ptr;
1217 u64 value;
5555ded4 1218
efd2b924
ACM
1219 if (!field)
1220 return 0;
5555ded4 1221
e6b6f679 1222 ptr = sample->raw_data + field->offset;
5555ded4 1223
e6b6f679
ACM
1224 switch (field->size) {
1225 case 1:
1226 return *(u8 *)ptr;
1227 case 2:
1228 value = *(u16 *)ptr;
1229 break;
1230 case 4:
1231 value = *(u32 *)ptr;
1232 break;
1233 case 8:
1234 value = *(u64 *)ptr;
1235 break;
1236 default:
1237 return 0;
1238 }
1239
1240 if (!evsel->needs_swap)
1241 return value;
1242
1243 switch (field->size) {
1244 case 2:
1245 return bswap_16(value);
1246 case 4:
1247 return bswap_32(value);
1248 case 8:
1249 return bswap_64(value);
1250 default:
1251 return 0;
1252 }
1253
1254 return 0;
5555ded4 1255}
0698aedd
ACM
1256
1257static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
1258{
1259 va_list args;
1260 int ret = 0;
1261
1262 if (!*first) {
1263 ret += fprintf(fp, ",");
1264 } else {
1265 ret += fprintf(fp, ":");
1266 *first = false;
1267 }
1268
1269 va_start(args, fmt);
1270 ret += vfprintf(fp, fmt, args);
1271 va_end(args);
1272 return ret;
1273}
1274
1275static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
1276{
1277 if (value == 0)
1278 return 0;
1279
1280 return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
1281}
1282
1283#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
1284
c79a4393
ACM
1285struct bit_names {
1286 int bit;
1287 const char *name;
1288};
1289
1290static int bits__fprintf(FILE *fp, const char *field, u64 value,
1291 struct bit_names *bits, bool *first)
1292{
1293 int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
1294 bool first_bit = true;
1295
1296 do {
1297 if (value & bits[i].bit) {
1298 printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
1299 first_bit = false;
1300 }
1301 } while (bits[++i].name != NULL);
1302
1303 return printed;
1304}
1305
1306static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
1307{
1308#define bit_name(n) { PERF_SAMPLE_##n, #n }
1309 struct bit_names bits[] = {
1310 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
1311 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1312 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1313 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1314 { .name = NULL, }
1315 };
1316#undef bit_name
1317 return bits__fprintf(fp, "sample_type", value, bits, first);
1318}
1319
1320static int read_format__fprintf(FILE *fp, bool *first, u64 value)
1321{
1322#define bit_name(n) { PERF_FORMAT_##n, #n }
1323 struct bit_names bits[] = {
1324 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
1325 bit_name(ID), bit_name(GROUP),
1326 { .name = NULL, }
1327 };
1328#undef bit_name
1329 return bits__fprintf(fp, "read_format", value, bits, first);
1330}
1331
0698aedd
ACM
1332int perf_evsel__fprintf(struct perf_evsel *evsel,
1333 struct perf_attr_details *details, FILE *fp)
1334{
1335 bool first = true;
1336 int printed = fprintf(fp, "%s", perf_evsel__name(evsel));
1337
1338 if (details->verbose || details->freq) {
1339 printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
1340 (u64)evsel->attr.sample_freq);
1341 }
1342
1343 if (details->verbose) {
1344 if_print(type);
1345 if_print(config);
1346 if_print(config1);
1347 if_print(config2);
1348 if_print(size);
c79a4393
ACM
1349 printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
1350 if (evsel->attr.read_format)
1351 printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
0698aedd
ACM
1352 if_print(disabled);
1353 if_print(inherit);
1354 if_print(pinned);
1355 if_print(exclusive);
1356 if_print(exclude_user);
1357 if_print(exclude_kernel);
1358 if_print(exclude_hv);
1359 if_print(exclude_idle);
1360 if_print(mmap);
1361 if_print(comm);
1362 if_print(freq);
1363 if_print(inherit_stat);
1364 if_print(enable_on_exec);
1365 if_print(task);
1366 if_print(watermark);
1367 if_print(precise_ip);
1368 if_print(mmap_data);
1369 if_print(sample_id_all);
1370 if_print(exclude_host);
1371 if_print(exclude_guest);
1372 if_print(__reserved_1);
1373 if_print(wakeup_events);
1374 if_print(bp_type);
1375 if_print(branch_sample_type);
1376 }
1377
1378 fputc('\n', fp);
1379 return ++printed;
1380}
c0a54341
ACM
1381
1382bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
1383 char *msg, size_t msgsize)
1384{
1385 if ((err == ENOENT || err == ENXIO) &&
1386 evsel->attr.type == PERF_TYPE_HARDWARE &&
1387 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
1388 /*
1389 * If it's cycles then fall back to hrtimer based
1390 * cpu-clock-tick sw counter, which is always available even if
1391 * no PMU support.
1392 *
1393 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
1394 * b0a873e).
1395 */
1396 scnprintf(msg, msgsize, "%s",
1397"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
1398
1399 evsel->attr.type = PERF_TYPE_SOFTWARE;
1400 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
1401
1402 free(evsel->name);
1403 evsel->name = NULL;
1404 return true;
1405 }
1406
1407 return false;
1408}
This page took 0.185601 seconds and 5 git commands to generate.