2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include <linux/bitops.h>
12 #include <api/fs/tracing_path.h>
13 #include <traceevent/event-parse.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/perf_event.h>
16 #include <linux/err.h>
17 #include <sys/resource.h>
19 #include "callchain.h"
25 #include "thread_map.h"
27 #include "perf_regs.h"
29 #include "trace-event.h"
40 } perf_missing_features
;
42 static clockid_t clockid
;
44 static int perf_evsel__no_extra_init(struct perf_evsel
*evsel __maybe_unused
)
49 static void perf_evsel__no_extra_fini(struct perf_evsel
*evsel __maybe_unused
)
55 int (*init
)(struct perf_evsel
*evsel
);
56 void (*fini
)(struct perf_evsel
*evsel
);
57 } perf_evsel__object
= {
58 .size
= sizeof(struct perf_evsel
),
59 .init
= perf_evsel__no_extra_init
,
60 .fini
= perf_evsel__no_extra_fini
,
63 int perf_evsel__object_config(size_t object_size
,
64 int (*init
)(struct perf_evsel
*evsel
),
65 void (*fini
)(struct perf_evsel
*evsel
))
71 if (perf_evsel__object
.size
> object_size
)
74 perf_evsel__object
.size
= object_size
;
78 perf_evsel__object
.init
= init
;
81 perf_evsel__object
.fini
= fini
;
86 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
88 int __perf_evsel__sample_size(u64 sample_type
)
90 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
94 for (i
= 0; i
< 64; i
++) {
95 if (mask
& (1ULL << i
))
105 * __perf_evsel__calc_id_pos - calculate id_pos.
106 * @sample_type: sample type
108 * This function returns the position of the event id (PERF_SAMPLE_ID or
109 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
112 static int __perf_evsel__calc_id_pos(u64 sample_type
)
116 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
119 if (!(sample_type
& PERF_SAMPLE_ID
))
122 if (sample_type
& PERF_SAMPLE_IP
)
125 if (sample_type
& PERF_SAMPLE_TID
)
128 if (sample_type
& PERF_SAMPLE_TIME
)
131 if (sample_type
& PERF_SAMPLE_ADDR
)
138 * __perf_evsel__calc_is_pos - calculate is_pos.
139 * @sample_type: sample type
141 * This function returns the position (counting backwards) of the event id
142 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
143 * sample_id_all is used there is an id sample appended to non-sample events.
145 static int __perf_evsel__calc_is_pos(u64 sample_type
)
149 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
152 if (!(sample_type
& PERF_SAMPLE_ID
))
155 if (sample_type
& PERF_SAMPLE_CPU
)
158 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
164 void perf_evsel__calc_id_pos(struct perf_evsel
*evsel
)
166 evsel
->id_pos
= __perf_evsel__calc_id_pos(evsel
->attr
.sample_type
);
167 evsel
->is_pos
= __perf_evsel__calc_is_pos(evsel
->attr
.sample_type
);
170 void __perf_evsel__set_sample_bit(struct perf_evsel
*evsel
,
171 enum perf_event_sample_format bit
)
173 if (!(evsel
->attr
.sample_type
& bit
)) {
174 evsel
->attr
.sample_type
|= bit
;
175 evsel
->sample_size
+= sizeof(u64
);
176 perf_evsel__calc_id_pos(evsel
);
180 void __perf_evsel__reset_sample_bit(struct perf_evsel
*evsel
,
181 enum perf_event_sample_format bit
)
183 if (evsel
->attr
.sample_type
& bit
) {
184 evsel
->attr
.sample_type
&= ~bit
;
185 evsel
->sample_size
-= sizeof(u64
);
186 perf_evsel__calc_id_pos(evsel
);
190 void perf_evsel__set_sample_id(struct perf_evsel
*evsel
,
191 bool can_sample_identifier
)
193 if (can_sample_identifier
) {
194 perf_evsel__reset_sample_bit(evsel
, ID
);
195 perf_evsel__set_sample_bit(evsel
, IDENTIFIER
);
197 perf_evsel__set_sample_bit(evsel
, ID
);
199 evsel
->attr
.read_format
|= PERF_FORMAT_ID
;
202 void perf_evsel__init(struct perf_evsel
*evsel
,
203 struct perf_event_attr
*attr
, int idx
)
206 evsel
->tracking
= !idx
;
208 evsel
->leader
= evsel
;
211 evsel
->evlist
= NULL
;
213 INIT_LIST_HEAD(&evsel
->node
);
214 INIT_LIST_HEAD(&evsel
->config_terms
);
215 perf_evsel__object
.init(evsel
);
216 evsel
->sample_size
= __perf_evsel__sample_size(attr
->sample_type
);
217 perf_evsel__calc_id_pos(evsel
);
218 evsel
->cmdline_group_boundary
= false;
221 struct perf_evsel
*perf_evsel__new_idx(struct perf_event_attr
*attr
, int idx
)
223 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
226 perf_evsel__init(evsel
, attr
, idx
);
228 if (perf_evsel__is_bpf_output(evsel
)) {
229 evsel
->attr
.sample_type
|= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
230 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
231 evsel
->attr
.sample_period
= 1;
238 * Returns pointer with encoded error via <linux/err.h> interface.
240 struct perf_evsel
*perf_evsel__newtp_idx(const char *sys
, const char *name
, int idx
)
242 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
248 struct perf_event_attr attr
= {
249 .type
= PERF_TYPE_TRACEPOINT
,
250 .sample_type
= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
251 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
254 if (asprintf(&evsel
->name
, "%s:%s", sys
, name
) < 0)
257 evsel
->tp_format
= trace_event__tp_format(sys
, name
);
258 if (IS_ERR(evsel
->tp_format
)) {
259 err
= PTR_ERR(evsel
->tp_format
);
263 event_attr_init(&attr
);
264 attr
.config
= evsel
->tp_format
->id
;
265 attr
.sample_period
= 1;
266 perf_evsel__init(evsel
, &attr
, idx
);
278 const char *perf_evsel__hw_names
[PERF_COUNT_HW_MAX
] = {
286 "stalled-cycles-frontend",
287 "stalled-cycles-backend",
291 static const char *__perf_evsel__hw_name(u64 config
)
293 if (config
< PERF_COUNT_HW_MAX
&& perf_evsel__hw_names
[config
])
294 return perf_evsel__hw_names
[config
];
296 return "unknown-hardware";
299 static int perf_evsel__add_modifiers(struct perf_evsel
*evsel
, char *bf
, size_t size
)
301 int colon
= 0, r
= 0;
302 struct perf_event_attr
*attr
= &evsel
->attr
;
303 bool exclude_guest_default
= false;
305 #define MOD_PRINT(context, mod) do { \
306 if (!attr->exclude_##context) { \
307 if (!colon) colon = ++r; \
308 r += scnprintf(bf + r, size - r, "%c", mod); \
311 if (attr
->exclude_kernel
|| attr
->exclude_user
|| attr
->exclude_hv
) {
312 MOD_PRINT(kernel
, 'k');
313 MOD_PRINT(user
, 'u');
315 exclude_guest_default
= true;
318 if (attr
->precise_ip
) {
321 r
+= scnprintf(bf
+ r
, size
- r
, "%.*s", attr
->precise_ip
, "ppp");
322 exclude_guest_default
= true;
325 if (attr
->exclude_host
|| attr
->exclude_guest
== exclude_guest_default
) {
326 MOD_PRINT(host
, 'H');
327 MOD_PRINT(guest
, 'G');
335 static int perf_evsel__hw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
337 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__hw_name(evsel
->attr
.config
));
338 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
341 const char *perf_evsel__sw_names
[PERF_COUNT_SW_MAX
] = {
354 static const char *__perf_evsel__sw_name(u64 config
)
356 if (config
< PERF_COUNT_SW_MAX
&& perf_evsel__sw_names
[config
])
357 return perf_evsel__sw_names
[config
];
358 return "unknown-software";
361 static int perf_evsel__sw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
363 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__sw_name(evsel
->attr
.config
));
364 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
367 static int __perf_evsel__bp_name(char *bf
, size_t size
, u64 addr
, u64 type
)
371 r
= scnprintf(bf
, size
, "mem:0x%" PRIx64
":", addr
);
373 if (type
& HW_BREAKPOINT_R
)
374 r
+= scnprintf(bf
+ r
, size
- r
, "r");
376 if (type
& HW_BREAKPOINT_W
)
377 r
+= scnprintf(bf
+ r
, size
- r
, "w");
379 if (type
& HW_BREAKPOINT_X
)
380 r
+= scnprintf(bf
+ r
, size
- r
, "x");
385 static int perf_evsel__bp_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
387 struct perf_event_attr
*attr
= &evsel
->attr
;
388 int r
= __perf_evsel__bp_name(bf
, size
, attr
->bp_addr
, attr
->bp_type
);
389 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
392 const char *perf_evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
]
393 [PERF_EVSEL__MAX_ALIASES
] = {
394 { "L1-dcache", "l1-d", "l1d", "L1-data", },
395 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
397 { "dTLB", "d-tlb", "Data-TLB", },
398 { "iTLB", "i-tlb", "Instruction-TLB", },
399 { "branch", "branches", "bpu", "btb", "bpc", },
403 const char *perf_evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
]
404 [PERF_EVSEL__MAX_ALIASES
] = {
405 { "load", "loads", "read", },
406 { "store", "stores", "write", },
407 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
410 const char *perf_evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
]
411 [PERF_EVSEL__MAX_ALIASES
] = {
412 { "refs", "Reference", "ops", "access", },
413 { "misses", "miss", },
416 #define C(x) PERF_COUNT_HW_CACHE_##x
417 #define CACHE_READ (1 << C(OP_READ))
418 #define CACHE_WRITE (1 << C(OP_WRITE))
419 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
420 #define COP(x) (1 << x)
423 * cache operartion stat
424 * L1I : Read and prefetch only
425 * ITLB and BPU : Read-only
427 static unsigned long perf_evsel__hw_cache_stat
[C(MAX
)] = {
428 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
429 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
430 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
431 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
432 [C(ITLB
)] = (CACHE_READ
),
433 [C(BPU
)] = (CACHE_READ
),
434 [C(NODE
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
437 bool perf_evsel__is_cache_op_valid(u8 type
, u8 op
)
439 if (perf_evsel__hw_cache_stat
[type
] & COP(op
))
440 return true; /* valid */
442 return false; /* invalid */
445 int __perf_evsel__hw_cache_type_op_res_name(u8 type
, u8 op
, u8 result
,
446 char *bf
, size_t size
)
449 return scnprintf(bf
, size
, "%s-%s-%s", perf_evsel__hw_cache
[type
][0],
450 perf_evsel__hw_cache_op
[op
][0],
451 perf_evsel__hw_cache_result
[result
][0]);
454 return scnprintf(bf
, size
, "%s-%s", perf_evsel__hw_cache
[type
][0],
455 perf_evsel__hw_cache_op
[op
][1]);
458 static int __perf_evsel__hw_cache_name(u64 config
, char *bf
, size_t size
)
460 u8 op
, result
, type
= (config
>> 0) & 0xff;
461 const char *err
= "unknown-ext-hardware-cache-type";
463 if (type
> PERF_COUNT_HW_CACHE_MAX
)
466 op
= (config
>> 8) & 0xff;
467 err
= "unknown-ext-hardware-cache-op";
468 if (op
> PERF_COUNT_HW_CACHE_OP_MAX
)
471 result
= (config
>> 16) & 0xff;
472 err
= "unknown-ext-hardware-cache-result";
473 if (result
> PERF_COUNT_HW_CACHE_RESULT_MAX
)
476 err
= "invalid-cache";
477 if (!perf_evsel__is_cache_op_valid(type
, op
))
480 return __perf_evsel__hw_cache_type_op_res_name(type
, op
, result
, bf
, size
);
482 return scnprintf(bf
, size
, "%s", err
);
485 static int perf_evsel__hw_cache_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
487 int ret
= __perf_evsel__hw_cache_name(evsel
->attr
.config
, bf
, size
);
488 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
491 static int perf_evsel__raw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
493 int ret
= scnprintf(bf
, size
, "raw 0x%" PRIx64
, evsel
->attr
.config
);
494 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
497 const char *perf_evsel__name(struct perf_evsel
*evsel
)
504 switch (evsel
->attr
.type
) {
506 perf_evsel__raw_name(evsel
, bf
, sizeof(bf
));
509 case PERF_TYPE_HARDWARE
:
510 perf_evsel__hw_name(evsel
, bf
, sizeof(bf
));
513 case PERF_TYPE_HW_CACHE
:
514 perf_evsel__hw_cache_name(evsel
, bf
, sizeof(bf
));
517 case PERF_TYPE_SOFTWARE
:
518 perf_evsel__sw_name(evsel
, bf
, sizeof(bf
));
521 case PERF_TYPE_TRACEPOINT
:
522 scnprintf(bf
, sizeof(bf
), "%s", "unknown tracepoint");
525 case PERF_TYPE_BREAKPOINT
:
526 perf_evsel__bp_name(evsel
, bf
, sizeof(bf
));
530 scnprintf(bf
, sizeof(bf
), "unknown attr type: %d",
535 evsel
->name
= strdup(bf
);
537 return evsel
->name
?: "unknown";
540 const char *perf_evsel__group_name(struct perf_evsel
*evsel
)
542 return evsel
->group_name
?: "anon group";
545 int perf_evsel__group_desc(struct perf_evsel
*evsel
, char *buf
, size_t size
)
548 struct perf_evsel
*pos
;
549 const char *group_name
= perf_evsel__group_name(evsel
);
551 ret
= scnprintf(buf
, size
, "%s", group_name
);
553 ret
+= scnprintf(buf
+ ret
, size
- ret
, " { %s",
554 perf_evsel__name(evsel
));
556 for_each_group_member(pos
, evsel
)
557 ret
+= scnprintf(buf
+ ret
, size
- ret
, ", %s",
558 perf_evsel__name(pos
));
560 ret
+= scnprintf(buf
+ ret
, size
- ret
, " }");
565 void perf_evsel__config_callchain(struct perf_evsel
*evsel
,
566 struct record_opts
*opts
,
567 struct callchain_param
*param
)
569 bool function
= perf_evsel__is_function_event(evsel
);
570 struct perf_event_attr
*attr
= &evsel
->attr
;
572 perf_evsel__set_sample_bit(evsel
, CALLCHAIN
);
574 if (param
->record_mode
== CALLCHAIN_LBR
) {
575 if (!opts
->branch_stack
) {
576 if (attr
->exclude_user
) {
577 pr_warning("LBR callstack option is only available "
578 "to get user callchain information. "
579 "Falling back to framepointers.\n");
581 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
582 attr
->branch_sample_type
= PERF_SAMPLE_BRANCH_USER
|
583 PERF_SAMPLE_BRANCH_CALL_STACK
|
584 PERF_SAMPLE_BRANCH_NO_CYCLES
|
585 PERF_SAMPLE_BRANCH_NO_FLAGS
;
588 pr_warning("Cannot use LBR callstack with branch stack. "
589 "Falling back to framepointers.\n");
592 if (param
->record_mode
== CALLCHAIN_DWARF
) {
594 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
595 perf_evsel__set_sample_bit(evsel
, STACK_USER
);
596 attr
->sample_regs_user
= PERF_REGS_MASK
;
597 attr
->sample_stack_user
= param
->dump_size
;
598 attr
->exclude_callchain_user
= 1;
600 pr_info("Cannot use DWARF unwind for function trace event,"
601 " falling back to framepointers.\n");
606 pr_info("Disabling user space callchains for function trace event.\n");
607 attr
->exclude_callchain_user
= 1;
612 perf_evsel__reset_callgraph(struct perf_evsel
*evsel
,
613 struct callchain_param
*param
)
615 struct perf_event_attr
*attr
= &evsel
->attr
;
617 perf_evsel__reset_sample_bit(evsel
, CALLCHAIN
);
618 if (param
->record_mode
== CALLCHAIN_LBR
) {
619 perf_evsel__reset_sample_bit(evsel
, BRANCH_STACK
);
620 attr
->branch_sample_type
&= ~(PERF_SAMPLE_BRANCH_USER
|
621 PERF_SAMPLE_BRANCH_CALL_STACK
);
623 if (param
->record_mode
== CALLCHAIN_DWARF
) {
624 perf_evsel__reset_sample_bit(evsel
, REGS_USER
);
625 perf_evsel__reset_sample_bit(evsel
, STACK_USER
);
629 static void apply_config_terms(struct perf_evsel
*evsel
,
630 struct record_opts
*opts
)
632 struct perf_evsel_config_term
*term
;
633 struct list_head
*config_terms
= &evsel
->config_terms
;
634 struct perf_event_attr
*attr
= &evsel
->attr
;
635 struct callchain_param param
;
637 char *callgraph_buf
= NULL
;
639 /* callgraph default */
640 param
.record_mode
= callchain_param
.record_mode
;
642 list_for_each_entry(term
, config_terms
, list
) {
643 switch (term
->type
) {
644 case PERF_EVSEL__CONFIG_TERM_PERIOD
:
645 attr
->sample_period
= term
->val
.period
;
648 case PERF_EVSEL__CONFIG_TERM_FREQ
:
649 attr
->sample_freq
= term
->val
.freq
;
652 case PERF_EVSEL__CONFIG_TERM_TIME
:
654 perf_evsel__set_sample_bit(evsel
, TIME
);
656 perf_evsel__reset_sample_bit(evsel
, TIME
);
658 case PERF_EVSEL__CONFIG_TERM_CALLGRAPH
:
659 callgraph_buf
= term
->val
.callgraph
;
661 case PERF_EVSEL__CONFIG_TERM_STACK_USER
:
662 dump_size
= term
->val
.stack_user
;
664 case PERF_EVSEL__CONFIG_TERM_INHERIT
:
666 * attr->inherit should has already been set by
667 * perf_evsel__config. If user explicitly set
668 * inherit using config terms, override global
669 * opt->no_inherit setting.
671 attr
->inherit
= term
->val
.inherit
? 1 : 0;
678 /* User explicitly set per-event callgraph, clear the old setting and reset. */
679 if ((callgraph_buf
!= NULL
) || (dump_size
> 0)) {
681 /* parse callgraph parameters */
682 if (callgraph_buf
!= NULL
) {
683 if (!strcmp(callgraph_buf
, "no")) {
684 param
.enabled
= false;
685 param
.record_mode
= CALLCHAIN_NONE
;
687 param
.enabled
= true;
688 if (parse_callchain_record(callgraph_buf
, ¶m
)) {
689 pr_err("per-event callgraph setting for %s failed. "
690 "Apply callgraph global setting for it\n",
697 dump_size
= round_up(dump_size
, sizeof(u64
));
698 param
.dump_size
= dump_size
;
701 /* If global callgraph set, clear it */
702 if (callchain_param
.enabled
)
703 perf_evsel__reset_callgraph(evsel
, &callchain_param
);
705 /* set perf-event callgraph */
707 perf_evsel__config_callchain(evsel
, opts
, ¶m
);
712 * The enable_on_exec/disabled value strategy:
714 * 1) For any type of traced program:
715 * - all independent events and group leaders are disabled
716 * - all group members are enabled
718 * Group members are ruled by group leaders. They need to
719 * be enabled, because the group scheduling relies on that.
721 * 2) For traced programs executed by perf:
722 * - all independent events and group leaders have
724 * - we don't specifically enable or disable any event during
727 * Independent events and group leaders are initially disabled
728 * and get enabled by exec. Group members are ruled by group
729 * leaders as stated in 1).
731 * 3) For traced programs attached by perf (pid/tid):
732 * - we specifically enable or disable all events during
735 * When attaching events to already running traced we
736 * enable/disable events specifically, as there's no
737 * initial traced exec call.
739 void perf_evsel__config(struct perf_evsel
*evsel
, struct record_opts
*opts
,
740 struct callchain_param
*callchain
)
742 struct perf_evsel
*leader
= evsel
->leader
;
743 struct perf_event_attr
*attr
= &evsel
->attr
;
744 int track
= evsel
->tracking
;
745 bool per_cpu
= opts
->target
.default_per_cpu
&& !opts
->target
.per_thread
;
747 attr
->sample_id_all
= perf_missing_features
.sample_id_all
? 0 : 1;
748 attr
->inherit
= !opts
->no_inherit
;
750 perf_evsel__set_sample_bit(evsel
, IP
);
751 perf_evsel__set_sample_bit(evsel
, TID
);
753 if (evsel
->sample_read
) {
754 perf_evsel__set_sample_bit(evsel
, READ
);
757 * We need ID even in case of single event, because
758 * PERF_SAMPLE_READ process ID specific data.
760 perf_evsel__set_sample_id(evsel
, false);
763 * Apply group format only if we belong to group
764 * with more than one members.
766 if (leader
->nr_members
> 1) {
767 attr
->read_format
|= PERF_FORMAT_GROUP
;
773 * We default some events to have a default interval. But keep
774 * it a weak assumption overridable by the user.
776 if (!attr
->sample_period
|| (opts
->user_freq
!= UINT_MAX
||
777 opts
->user_interval
!= ULLONG_MAX
)) {
779 perf_evsel__set_sample_bit(evsel
, PERIOD
);
781 attr
->sample_freq
= opts
->freq
;
783 attr
->sample_period
= opts
->default_interval
;
788 * Disable sampling for all group members other
789 * than leader in case leader 'leads' the sampling.
791 if ((leader
!= evsel
) && leader
->sample_read
) {
792 attr
->sample_freq
= 0;
793 attr
->sample_period
= 0;
796 if (opts
->no_samples
)
797 attr
->sample_freq
= 0;
799 if (opts
->inherit_stat
)
800 attr
->inherit_stat
= 1;
802 if (opts
->sample_address
) {
803 perf_evsel__set_sample_bit(evsel
, ADDR
);
804 attr
->mmap_data
= track
;
808 * We don't allow user space callchains for function trace
809 * event, due to issues with page faults while tracing page
810 * fault handler and its overall trickiness nature.
812 if (perf_evsel__is_function_event(evsel
))
813 evsel
->attr
.exclude_callchain_user
= 1;
815 if (callchain
&& callchain
->enabled
&& !evsel
->no_aux_samples
)
816 perf_evsel__config_callchain(evsel
, opts
, callchain
);
818 if (opts
->sample_intr_regs
) {
819 attr
->sample_regs_intr
= opts
->sample_intr_regs
;
820 perf_evsel__set_sample_bit(evsel
, REGS_INTR
);
823 if (target__has_cpu(&opts
->target
))
824 perf_evsel__set_sample_bit(evsel
, CPU
);
827 perf_evsel__set_sample_bit(evsel
, PERIOD
);
830 * When the user explicitly disabled time don't force it here.
832 if (opts
->sample_time
&&
833 (!perf_missing_features
.sample_id_all
&&
834 (!opts
->no_inherit
|| target__has_cpu(&opts
->target
) || per_cpu
||
835 opts
->sample_time_set
)))
836 perf_evsel__set_sample_bit(evsel
, TIME
);
838 if (opts
->raw_samples
&& !evsel
->no_aux_samples
) {
839 perf_evsel__set_sample_bit(evsel
, TIME
);
840 perf_evsel__set_sample_bit(evsel
, RAW
);
841 perf_evsel__set_sample_bit(evsel
, CPU
);
844 if (opts
->sample_address
)
845 perf_evsel__set_sample_bit(evsel
, DATA_SRC
);
847 if (opts
->no_buffering
) {
849 attr
->wakeup_events
= 1;
851 if (opts
->branch_stack
&& !evsel
->no_aux_samples
) {
852 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
853 attr
->branch_sample_type
= opts
->branch_stack
;
856 if (opts
->sample_weight
)
857 perf_evsel__set_sample_bit(evsel
, WEIGHT
);
861 attr
->mmap2
= track
&& !perf_missing_features
.mmap2
;
864 if (opts
->record_switch_events
)
865 attr
->context_switch
= track
;
867 if (opts
->sample_transaction
)
868 perf_evsel__set_sample_bit(evsel
, TRANSACTION
);
870 if (opts
->running_time
) {
871 evsel
->attr
.read_format
|=
872 PERF_FORMAT_TOTAL_TIME_ENABLED
|
873 PERF_FORMAT_TOTAL_TIME_RUNNING
;
877 * XXX see the function comment above
879 * Disabling only independent events or group leaders,
880 * keeping group members enabled.
882 if (perf_evsel__is_group_leader(evsel
))
886 * Setting enable_on_exec for independent events and
887 * group leaders for traced executed by perf.
889 if (target__none(&opts
->target
) && perf_evsel__is_group_leader(evsel
) &&
890 !opts
->initial_delay
)
891 attr
->enable_on_exec
= 1;
893 if (evsel
->immediate
) {
895 attr
->enable_on_exec
= 0;
898 clockid
= opts
->clockid
;
899 if (opts
->use_clockid
) {
900 attr
->use_clockid
= 1;
901 attr
->clockid
= opts
->clockid
;
904 if (evsel
->precise_max
)
905 perf_event_attr__set_max_precise_ip(attr
);
907 if (opts
->all_user
) {
908 attr
->exclude_kernel
= 1;
909 attr
->exclude_user
= 0;
912 if (opts
->all_kernel
) {
913 attr
->exclude_kernel
= 0;
914 attr
->exclude_user
= 1;
918 * Apply event specific term settings,
919 * it overloads any global configuration.
921 apply_config_terms(evsel
, opts
);
924 static int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
928 if (evsel
->system_wide
)
931 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
934 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
935 for (thread
= 0; thread
< nthreads
; thread
++) {
936 FD(evsel
, cpu
, thread
) = -1;
941 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
944 static int perf_evsel__run_ioctl(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
949 if (evsel
->system_wide
)
952 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
953 for (thread
= 0; thread
< nthreads
; thread
++) {
954 int fd
= FD(evsel
, cpu
, thread
),
955 err
= ioctl(fd
, ioc
, arg
);
965 int perf_evsel__apply_filter(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
968 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
969 PERF_EVENT_IOC_SET_FILTER
,
973 int perf_evsel__set_filter(struct perf_evsel
*evsel
, const char *filter
)
975 char *new_filter
= strdup(filter
);
977 if (new_filter
!= NULL
) {
979 evsel
->filter
= new_filter
;
986 int perf_evsel__append_filter(struct perf_evsel
*evsel
,
987 const char *op
, const char *filter
)
991 if (evsel
->filter
== NULL
)
992 return perf_evsel__set_filter(evsel
, filter
);
994 if (asprintf(&new_filter
,"(%s) %s (%s)", evsel
->filter
, op
, filter
) > 0) {
996 evsel
->filter
= new_filter
;
1003 int perf_evsel__enable(struct perf_evsel
*evsel
)
1005 int nthreads
= thread_map__nr(evsel
->threads
);
1006 int ncpus
= cpu_map__nr(evsel
->cpus
);
1008 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
1009 PERF_EVENT_IOC_ENABLE
,
1013 int perf_evsel__disable(struct perf_evsel
*evsel
)
1015 int nthreads
= thread_map__nr(evsel
->threads
);
1016 int ncpus
= cpu_map__nr(evsel
->cpus
);
1018 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
1019 PERF_EVENT_IOC_DISABLE
,
1023 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1025 if (ncpus
== 0 || nthreads
== 0)
1028 if (evsel
->system_wide
)
1031 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
1032 if (evsel
->sample_id
== NULL
)
1035 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
1036 if (evsel
->id
== NULL
) {
1037 xyarray__delete(evsel
->sample_id
);
1038 evsel
->sample_id
= NULL
;
1045 static void perf_evsel__free_fd(struct perf_evsel
*evsel
)
1047 xyarray__delete(evsel
->fd
);
1051 static void perf_evsel__free_id(struct perf_evsel
*evsel
)
1053 xyarray__delete(evsel
->sample_id
);
1054 evsel
->sample_id
= NULL
;
1058 static void perf_evsel__free_config_terms(struct perf_evsel
*evsel
)
1060 struct perf_evsel_config_term
*term
, *h
;
1062 list_for_each_entry_safe(term
, h
, &evsel
->config_terms
, list
) {
1063 list_del(&term
->list
);
1068 void perf_evsel__close_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1072 if (evsel
->system_wide
)
1075 for (cpu
= 0; cpu
< ncpus
; cpu
++)
1076 for (thread
= 0; thread
< nthreads
; ++thread
) {
1077 close(FD(evsel
, cpu
, thread
));
1078 FD(evsel
, cpu
, thread
) = -1;
1082 void perf_evsel__exit(struct perf_evsel
*evsel
)
1084 assert(list_empty(&evsel
->node
));
1085 assert(evsel
->evlist
== NULL
);
1086 perf_evsel__free_fd(evsel
);
1087 perf_evsel__free_id(evsel
);
1088 perf_evsel__free_config_terms(evsel
);
1089 close_cgroup(evsel
->cgrp
);
1090 cpu_map__put(evsel
->cpus
);
1091 cpu_map__put(evsel
->own_cpus
);
1092 thread_map__put(evsel
->threads
);
1093 zfree(&evsel
->group_name
);
1094 zfree(&evsel
->name
);
1095 perf_evsel__object
.fini(evsel
);
1098 void perf_evsel__delete(struct perf_evsel
*evsel
)
1100 perf_evsel__exit(evsel
);
1104 void perf_evsel__compute_deltas(struct perf_evsel
*evsel
, int cpu
, int thread
,
1105 struct perf_counts_values
*count
)
1107 struct perf_counts_values tmp
;
1109 if (!evsel
->prev_raw_counts
)
1113 tmp
= evsel
->prev_raw_counts
->aggr
;
1114 evsel
->prev_raw_counts
->aggr
= *count
;
1116 tmp
= *perf_counts(evsel
->prev_raw_counts
, cpu
, thread
);
1117 *perf_counts(evsel
->prev_raw_counts
, cpu
, thread
) = *count
;
1120 count
->val
= count
->val
- tmp
.val
;
1121 count
->ena
= count
->ena
- tmp
.ena
;
1122 count
->run
= count
->run
- tmp
.run
;
1125 void perf_counts_values__scale(struct perf_counts_values
*count
,
1126 bool scale
, s8
*pscaled
)
1131 if (count
->run
== 0) {
1134 } else if (count
->run
< count
->ena
) {
1136 count
->val
= (u64
)((double) count
->val
* count
->ena
/ count
->run
+ 0.5);
1139 count
->ena
= count
->run
= 0;
1145 int perf_evsel__read(struct perf_evsel
*evsel
, int cpu
, int thread
,
1146 struct perf_counts_values
*count
)
1148 memset(count
, 0, sizeof(*count
));
1150 if (FD(evsel
, cpu
, thread
) < 0)
1153 if (readn(FD(evsel
, cpu
, thread
), count
, sizeof(*count
)) < 0)
1159 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
1160 int cpu
, int thread
, bool scale
)
1162 struct perf_counts_values count
;
1163 size_t nv
= scale
? 3 : 1;
1165 if (FD(evsel
, cpu
, thread
) < 0)
1168 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1, thread
+ 1) < 0)
1171 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) < 0)
1174 perf_evsel__compute_deltas(evsel
, cpu
, thread
, &count
);
1175 perf_counts_values__scale(&count
, scale
, NULL
);
1176 *perf_counts(evsel
->counts
, cpu
, thread
) = count
;
1180 static int get_group_fd(struct perf_evsel
*evsel
, int cpu
, int thread
)
1182 struct perf_evsel
*leader
= evsel
->leader
;
1185 if (perf_evsel__is_group_leader(evsel
))
1189 * Leader must be already processed/open,
1190 * if not it's a bug.
1192 BUG_ON(!leader
->fd
);
1194 fd
= FD(leader
, cpu
, thread
);
1205 static void __p_bits(char *buf
, size_t size
, u64 value
, struct bit_names
*bits
)
1207 bool first_bit
= true;
1211 if (value
& bits
[i
].bit
) {
1212 buf
+= scnprintf(buf
, size
, "%s%s", first_bit
? "" : "|", bits
[i
].name
);
1215 } while (bits
[++i
].name
!= NULL
);
1218 static void __p_sample_type(char *buf
, size_t size
, u64 value
)
1220 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1221 struct bit_names bits
[] = {
1222 bit_name(IP
), bit_name(TID
), bit_name(TIME
), bit_name(ADDR
),
1223 bit_name(READ
), bit_name(CALLCHAIN
), bit_name(ID
), bit_name(CPU
),
1224 bit_name(PERIOD
), bit_name(STREAM_ID
), bit_name(RAW
),
1225 bit_name(BRANCH_STACK
), bit_name(REGS_USER
), bit_name(STACK_USER
),
1226 bit_name(IDENTIFIER
), bit_name(REGS_INTR
), bit_name(DATA_SRC
),
1231 __p_bits(buf
, size
, value
, bits
);
1234 static void __p_branch_sample_type(char *buf
, size_t size
, u64 value
)
1236 #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
1237 struct bit_names bits
[] = {
1238 bit_name(USER
), bit_name(KERNEL
), bit_name(HV
), bit_name(ANY
),
1239 bit_name(ANY_CALL
), bit_name(ANY_RETURN
), bit_name(IND_CALL
),
1240 bit_name(ABORT_TX
), bit_name(IN_TX
), bit_name(NO_TX
),
1241 bit_name(COND
), bit_name(CALL_STACK
), bit_name(IND_JUMP
),
1242 bit_name(CALL
), bit_name(NO_FLAGS
), bit_name(NO_CYCLES
),
1246 __p_bits(buf
, size
, value
, bits
);
1249 static void __p_read_format(char *buf
, size_t size
, u64 value
)
1251 #define bit_name(n) { PERF_FORMAT_##n, #n }
1252 struct bit_names bits
[] = {
1253 bit_name(TOTAL_TIME_ENABLED
), bit_name(TOTAL_TIME_RUNNING
),
1254 bit_name(ID
), bit_name(GROUP
),
1258 __p_bits(buf
, size
, value
, bits
);
1261 #define BUF_SIZE 1024
1263 #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1264 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1265 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1266 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
1267 #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
1268 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
1270 #define PRINT_ATTRn(_n, _f, _p) \
1274 ret += attr__fprintf(fp, _n, buf, priv);\
1278 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
1280 int perf_event_attr__fprintf(FILE *fp
, struct perf_event_attr
*attr
,
1281 attr__fprintf_f attr__fprintf
, void *priv
)
1286 PRINT_ATTRf(type
, p_unsigned
);
1287 PRINT_ATTRf(size
, p_unsigned
);
1288 PRINT_ATTRf(config
, p_hex
);
1289 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period
, p_unsigned
);
1290 PRINT_ATTRf(sample_type
, p_sample_type
);
1291 PRINT_ATTRf(read_format
, p_read_format
);
1293 PRINT_ATTRf(disabled
, p_unsigned
);
1294 PRINT_ATTRf(inherit
, p_unsigned
);
1295 PRINT_ATTRf(pinned
, p_unsigned
);
1296 PRINT_ATTRf(exclusive
, p_unsigned
);
1297 PRINT_ATTRf(exclude_user
, p_unsigned
);
1298 PRINT_ATTRf(exclude_kernel
, p_unsigned
);
1299 PRINT_ATTRf(exclude_hv
, p_unsigned
);
1300 PRINT_ATTRf(exclude_idle
, p_unsigned
);
1301 PRINT_ATTRf(mmap
, p_unsigned
);
1302 PRINT_ATTRf(comm
, p_unsigned
);
1303 PRINT_ATTRf(freq
, p_unsigned
);
1304 PRINT_ATTRf(inherit_stat
, p_unsigned
);
1305 PRINT_ATTRf(enable_on_exec
, p_unsigned
);
1306 PRINT_ATTRf(task
, p_unsigned
);
1307 PRINT_ATTRf(watermark
, p_unsigned
);
1308 PRINT_ATTRf(precise_ip
, p_unsigned
);
1309 PRINT_ATTRf(mmap_data
, p_unsigned
);
1310 PRINT_ATTRf(sample_id_all
, p_unsigned
);
1311 PRINT_ATTRf(exclude_host
, p_unsigned
);
1312 PRINT_ATTRf(exclude_guest
, p_unsigned
);
1313 PRINT_ATTRf(exclude_callchain_kernel
, p_unsigned
);
1314 PRINT_ATTRf(exclude_callchain_user
, p_unsigned
);
1315 PRINT_ATTRf(mmap2
, p_unsigned
);
1316 PRINT_ATTRf(comm_exec
, p_unsigned
);
1317 PRINT_ATTRf(use_clockid
, p_unsigned
);
1318 PRINT_ATTRf(context_switch
, p_unsigned
);
1319 PRINT_ATTRf(write_backward
, p_unsigned
);
1321 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events
, p_unsigned
);
1322 PRINT_ATTRf(bp_type
, p_unsigned
);
1323 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr
, p_hex
);
1324 PRINT_ATTRn("{ bp_len, config2 }", bp_len
, p_hex
);
1325 PRINT_ATTRf(branch_sample_type
, p_branch_sample_type
);
1326 PRINT_ATTRf(sample_regs_user
, p_hex
);
1327 PRINT_ATTRf(sample_stack_user
, p_unsigned
);
1328 PRINT_ATTRf(clockid
, p_signed
);
1329 PRINT_ATTRf(sample_regs_intr
, p_hex
);
1330 PRINT_ATTRf(aux_watermark
, p_unsigned
);
1335 static int __open_attr__fprintf(FILE *fp
, const char *name
, const char *val
,
1336 void *priv
__attribute__((unused
)))
1338 return fprintf(fp
, " %-32s %s\n", name
, val
);
1341 static int __perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1342 struct thread_map
*threads
)
1344 int cpu
, thread
, nthreads
;
1345 unsigned long flags
= PERF_FLAG_FD_CLOEXEC
;
1347 enum { NO_CHANGE
, SET_TO_MAX
, INCREASED_MAX
} set_rlimit
= NO_CHANGE
;
1349 if (evsel
->system_wide
)
1352 nthreads
= threads
->nr
;
1354 if (evsel
->fd
== NULL
&&
1355 perf_evsel__alloc_fd(evsel
, cpus
->nr
, nthreads
) < 0)
1359 flags
|= PERF_FLAG_PID_CGROUP
;
1360 pid
= evsel
->cgrp
->fd
;
1363 fallback_missing_features
:
1364 if (perf_missing_features
.clockid_wrong
)
1365 evsel
->attr
.clockid
= CLOCK_MONOTONIC
; /* should always work */
1366 if (perf_missing_features
.clockid
) {
1367 evsel
->attr
.use_clockid
= 0;
1368 evsel
->attr
.clockid
= 0;
1370 if (perf_missing_features
.cloexec
)
1371 flags
&= ~(unsigned long)PERF_FLAG_FD_CLOEXEC
;
1372 if (perf_missing_features
.mmap2
)
1373 evsel
->attr
.mmap2
= 0;
1374 if (perf_missing_features
.exclude_guest
)
1375 evsel
->attr
.exclude_guest
= evsel
->attr
.exclude_host
= 0;
1376 if (perf_missing_features
.lbr_flags
)
1377 evsel
->attr
.branch_sample_type
&= ~(PERF_SAMPLE_BRANCH_NO_FLAGS
|
1378 PERF_SAMPLE_BRANCH_NO_CYCLES
);
1380 if (perf_missing_features
.sample_id_all
)
1381 evsel
->attr
.sample_id_all
= 0;
1384 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1385 fprintf(stderr
, "perf_event_attr:\n");
1386 perf_event_attr__fprintf(stderr
, &evsel
->attr
, __open_attr__fprintf
, NULL
);
1387 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1390 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
1392 for (thread
= 0; thread
< nthreads
; thread
++) {
1395 if (!evsel
->cgrp
&& !evsel
->system_wide
)
1396 pid
= thread_map__pid(threads
, thread
);
1398 group_fd
= get_group_fd(evsel
, cpu
, thread
);
1400 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
1401 pid
, cpus
->map
[cpu
], group_fd
, flags
);
1403 FD(evsel
, cpu
, thread
) = sys_perf_event_open(&evsel
->attr
,
1407 if (FD(evsel
, cpu
, thread
) < 0) {
1409 pr_debug2("sys_perf_event_open failed, error %d\n",
1414 if (evsel
->bpf_fd
>= 0) {
1415 int evt_fd
= FD(evsel
, cpu
, thread
);
1416 int bpf_fd
= evsel
->bpf_fd
;
1419 PERF_EVENT_IOC_SET_BPF
,
1421 if (err
&& errno
!= EEXIST
) {
1422 pr_err("failed to attach bpf fd %d: %s\n",
1423 bpf_fd
, strerror(errno
));
1429 set_rlimit
= NO_CHANGE
;
1432 * If we succeeded but had to kill clockid, fail and
1433 * have perf_evsel__open_strerror() print us a nice
1436 if (perf_missing_features
.clockid
||
1437 perf_missing_features
.clockid_wrong
) {
1448 * perf stat needs between 5 and 22 fds per CPU. When we run out
1449 * of them try to increase the limits.
1451 if (err
== -EMFILE
&& set_rlimit
< INCREASED_MAX
) {
1453 int old_errno
= errno
;
1455 if (getrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1456 if (set_rlimit
== NO_CHANGE
)
1457 l
.rlim_cur
= l
.rlim_max
;
1459 l
.rlim_cur
= l
.rlim_max
+ 1000;
1460 l
.rlim_max
= l
.rlim_cur
;
1462 if (setrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1471 if (err
!= -EINVAL
|| cpu
> 0 || thread
> 0)
1475 * Must probe features in the order they were added to the
1476 * perf_event_attr interface.
1478 if (!perf_missing_features
.clockid_wrong
&& evsel
->attr
.use_clockid
) {
1479 perf_missing_features
.clockid_wrong
= true;
1480 goto fallback_missing_features
;
1481 } else if (!perf_missing_features
.clockid
&& evsel
->attr
.use_clockid
) {
1482 perf_missing_features
.clockid
= true;
1483 goto fallback_missing_features
;
1484 } else if (!perf_missing_features
.cloexec
&& (flags
& PERF_FLAG_FD_CLOEXEC
)) {
1485 perf_missing_features
.cloexec
= true;
1486 goto fallback_missing_features
;
1487 } else if (!perf_missing_features
.mmap2
&& evsel
->attr
.mmap2
) {
1488 perf_missing_features
.mmap2
= true;
1489 goto fallback_missing_features
;
1490 } else if (!perf_missing_features
.exclude_guest
&&
1491 (evsel
->attr
.exclude_guest
|| evsel
->attr
.exclude_host
)) {
1492 perf_missing_features
.exclude_guest
= true;
1493 goto fallback_missing_features
;
1494 } else if (!perf_missing_features
.sample_id_all
) {
1495 perf_missing_features
.sample_id_all
= true;
1496 goto retry_sample_id
;
1497 } else if (!perf_missing_features
.lbr_flags
&&
1498 (evsel
->attr
.branch_sample_type
&
1499 (PERF_SAMPLE_BRANCH_NO_CYCLES
|
1500 PERF_SAMPLE_BRANCH_NO_FLAGS
))) {
1501 perf_missing_features
.lbr_flags
= true;
1502 goto fallback_missing_features
;
1507 while (--thread
>= 0) {
1508 close(FD(evsel
, cpu
, thread
));
1509 FD(evsel
, cpu
, thread
) = -1;
1512 } while (--cpu
>= 0);
1516 void perf_evsel__close(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1518 if (evsel
->fd
== NULL
)
1521 perf_evsel__close_fd(evsel
, ncpus
, nthreads
);
1522 perf_evsel__free_fd(evsel
);
1534 struct thread_map map
;
1536 } empty_thread_map
= {
1541 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1542 struct thread_map
*threads
)
1545 /* Work around old compiler warnings about strict aliasing */
1546 cpus
= &empty_cpu_map
.map
;
1549 if (threads
== NULL
)
1550 threads
= &empty_thread_map
.map
;
1552 return __perf_evsel__open(evsel
, cpus
, threads
);
1555 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
1556 struct cpu_map
*cpus
)
1558 return __perf_evsel__open(evsel
, cpus
, &empty_thread_map
.map
);
1561 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
1562 struct thread_map
*threads
)
1564 return __perf_evsel__open(evsel
, &empty_cpu_map
.map
, threads
);
1567 static int perf_evsel__parse_id_sample(const struct perf_evsel
*evsel
,
1568 const union perf_event
*event
,
1569 struct perf_sample
*sample
)
1571 u64 type
= evsel
->attr
.sample_type
;
1572 const u64
*array
= event
->sample
.array
;
1573 bool swapped
= evsel
->needs_swap
;
1576 array
+= ((event
->header
.size
-
1577 sizeof(event
->header
)) / sizeof(u64
)) - 1;
1579 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1580 sample
->id
= *array
;
1584 if (type
& PERF_SAMPLE_CPU
) {
1587 /* undo swap of u64, then swap on individual u32s */
1588 u
.val64
= bswap_64(u
.val64
);
1589 u
.val32
[0] = bswap_32(u
.val32
[0]);
1592 sample
->cpu
= u
.val32
[0];
1596 if (type
& PERF_SAMPLE_STREAM_ID
) {
1597 sample
->stream_id
= *array
;
1601 if (type
& PERF_SAMPLE_ID
) {
1602 sample
->id
= *array
;
1606 if (type
& PERF_SAMPLE_TIME
) {
1607 sample
->time
= *array
;
1611 if (type
& PERF_SAMPLE_TID
) {
1614 /* undo swap of u64, then swap on individual u32s */
1615 u
.val64
= bswap_64(u
.val64
);
1616 u
.val32
[0] = bswap_32(u
.val32
[0]);
1617 u
.val32
[1] = bswap_32(u
.val32
[1]);
1620 sample
->pid
= u
.val32
[0];
1621 sample
->tid
= u
.val32
[1];
1628 static inline bool overflow(const void *endp
, u16 max_size
, const void *offset
,
1631 return size
> max_size
|| offset
+ size
> endp
;
1634 #define OVERFLOW_CHECK(offset, size, max_size) \
1636 if (overflow(endp, (max_size), (offset), (size))) \
1640 #define OVERFLOW_CHECK_u64(offset) \
1641 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1643 int perf_evsel__parse_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
1644 struct perf_sample
*data
)
1646 u64 type
= evsel
->attr
.sample_type
;
1647 bool swapped
= evsel
->needs_swap
;
1649 u16 max_size
= event
->header
.size
;
1650 const void *endp
= (void *)event
+ max_size
;
1654 * used for cross-endian analysis. See git commit 65014ab3
1655 * for why this goofiness is needed.
1659 memset(data
, 0, sizeof(*data
));
1660 data
->cpu
= data
->pid
= data
->tid
= -1;
1661 data
->stream_id
= data
->id
= data
->time
= -1ULL;
1662 data
->period
= evsel
->attr
.sample_period
;
1664 data
->cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1666 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
1667 if (!evsel
->attr
.sample_id_all
)
1669 return perf_evsel__parse_id_sample(evsel
, event
, data
);
1672 array
= event
->sample
.array
;
1675 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1676 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
1677 * check the format does not go past the end of the event.
1679 if (evsel
->sample_size
+ sizeof(event
->header
) > event
->header
.size
)
1683 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1688 if (type
& PERF_SAMPLE_IP
) {
1693 if (type
& PERF_SAMPLE_TID
) {
1696 /* undo swap of u64, then swap on individual u32s */
1697 u
.val64
= bswap_64(u
.val64
);
1698 u
.val32
[0] = bswap_32(u
.val32
[0]);
1699 u
.val32
[1] = bswap_32(u
.val32
[1]);
1702 data
->pid
= u
.val32
[0];
1703 data
->tid
= u
.val32
[1];
1707 if (type
& PERF_SAMPLE_TIME
) {
1708 data
->time
= *array
;
1713 if (type
& PERF_SAMPLE_ADDR
) {
1714 data
->addr
= *array
;
1718 if (type
& PERF_SAMPLE_ID
) {
1723 if (type
& PERF_SAMPLE_STREAM_ID
) {
1724 data
->stream_id
= *array
;
1728 if (type
& PERF_SAMPLE_CPU
) {
1732 /* undo swap of u64, then swap on individual u32s */
1733 u
.val64
= bswap_64(u
.val64
);
1734 u
.val32
[0] = bswap_32(u
.val32
[0]);
1737 data
->cpu
= u
.val32
[0];
1741 if (type
& PERF_SAMPLE_PERIOD
) {
1742 data
->period
= *array
;
1746 if (type
& PERF_SAMPLE_READ
) {
1747 u64 read_format
= evsel
->attr
.read_format
;
1749 OVERFLOW_CHECK_u64(array
);
1750 if (read_format
& PERF_FORMAT_GROUP
)
1751 data
->read
.group
.nr
= *array
;
1753 data
->read
.one
.value
= *array
;
1757 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1758 OVERFLOW_CHECK_u64(array
);
1759 data
->read
.time_enabled
= *array
;
1763 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1764 OVERFLOW_CHECK_u64(array
);
1765 data
->read
.time_running
= *array
;
1769 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1770 if (read_format
& PERF_FORMAT_GROUP
) {
1771 const u64 max_group_nr
= UINT64_MAX
/
1772 sizeof(struct sample_read_value
);
1774 if (data
->read
.group
.nr
> max_group_nr
)
1776 sz
= data
->read
.group
.nr
*
1777 sizeof(struct sample_read_value
);
1778 OVERFLOW_CHECK(array
, sz
, max_size
);
1779 data
->read
.group
.values
=
1780 (struct sample_read_value
*)array
;
1781 array
= (void *)array
+ sz
;
1783 OVERFLOW_CHECK_u64(array
);
1784 data
->read
.one
.id
= *array
;
1789 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1790 const u64 max_callchain_nr
= UINT64_MAX
/ sizeof(u64
);
1792 OVERFLOW_CHECK_u64(array
);
1793 data
->callchain
= (struct ip_callchain
*)array
++;
1794 if (data
->callchain
->nr
> max_callchain_nr
)
1796 sz
= data
->callchain
->nr
* sizeof(u64
);
1797 OVERFLOW_CHECK(array
, sz
, max_size
);
1798 array
= (void *)array
+ sz
;
1801 if (type
& PERF_SAMPLE_RAW
) {
1802 OVERFLOW_CHECK_u64(array
);
1804 if (WARN_ONCE(swapped
,
1805 "Endianness of raw data not corrected!\n")) {
1806 /* undo swap of u64, then swap on individual u32s */
1807 u
.val64
= bswap_64(u
.val64
);
1808 u
.val32
[0] = bswap_32(u
.val32
[0]);
1809 u
.val32
[1] = bswap_32(u
.val32
[1]);
1811 data
->raw_size
= u
.val32
[0];
1812 array
= (void *)array
+ sizeof(u32
);
1814 OVERFLOW_CHECK(array
, data
->raw_size
, max_size
);
1815 data
->raw_data
= (void *)array
;
1816 array
= (void *)array
+ data
->raw_size
;
1819 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1820 const u64 max_branch_nr
= UINT64_MAX
/
1821 sizeof(struct branch_entry
);
1823 OVERFLOW_CHECK_u64(array
);
1824 data
->branch_stack
= (struct branch_stack
*)array
++;
1826 if (data
->branch_stack
->nr
> max_branch_nr
)
1828 sz
= data
->branch_stack
->nr
* sizeof(struct branch_entry
);
1829 OVERFLOW_CHECK(array
, sz
, max_size
);
1830 array
= (void *)array
+ sz
;
1833 if (type
& PERF_SAMPLE_REGS_USER
) {
1834 OVERFLOW_CHECK_u64(array
);
1835 data
->user_regs
.abi
= *array
;
1838 if (data
->user_regs
.abi
) {
1839 u64 mask
= evsel
->attr
.sample_regs_user
;
1841 sz
= hweight_long(mask
) * sizeof(u64
);
1842 OVERFLOW_CHECK(array
, sz
, max_size
);
1843 data
->user_regs
.mask
= mask
;
1844 data
->user_regs
.regs
= (u64
*)array
;
1845 array
= (void *)array
+ sz
;
1849 if (type
& PERF_SAMPLE_STACK_USER
) {
1850 OVERFLOW_CHECK_u64(array
);
1853 data
->user_stack
.offset
= ((char *)(array
- 1)
1857 data
->user_stack
.size
= 0;
1859 OVERFLOW_CHECK(array
, sz
, max_size
);
1860 data
->user_stack
.data
= (char *)array
;
1861 array
= (void *)array
+ sz
;
1862 OVERFLOW_CHECK_u64(array
);
1863 data
->user_stack
.size
= *array
++;
1864 if (WARN_ONCE(data
->user_stack
.size
> sz
,
1865 "user stack dump failure\n"))
1871 if (type
& PERF_SAMPLE_WEIGHT
) {
1872 OVERFLOW_CHECK_u64(array
);
1873 data
->weight
= *array
;
1877 data
->data_src
= PERF_MEM_DATA_SRC_NONE
;
1878 if (type
& PERF_SAMPLE_DATA_SRC
) {
1879 OVERFLOW_CHECK_u64(array
);
1880 data
->data_src
= *array
;
1884 data
->transaction
= 0;
1885 if (type
& PERF_SAMPLE_TRANSACTION
) {
1886 OVERFLOW_CHECK_u64(array
);
1887 data
->transaction
= *array
;
1891 data
->intr_regs
.abi
= PERF_SAMPLE_REGS_ABI_NONE
;
1892 if (type
& PERF_SAMPLE_REGS_INTR
) {
1893 OVERFLOW_CHECK_u64(array
);
1894 data
->intr_regs
.abi
= *array
;
1897 if (data
->intr_regs
.abi
!= PERF_SAMPLE_REGS_ABI_NONE
) {
1898 u64 mask
= evsel
->attr
.sample_regs_intr
;
1900 sz
= hweight_long(mask
) * sizeof(u64
);
1901 OVERFLOW_CHECK(array
, sz
, max_size
);
1902 data
->intr_regs
.mask
= mask
;
1903 data
->intr_regs
.regs
= (u64
*)array
;
1904 array
= (void *)array
+ sz
;
1911 size_t perf_event__sample_event_size(const struct perf_sample
*sample
, u64 type
,
1914 size_t sz
, result
= sizeof(struct sample_event
);
1916 if (type
& PERF_SAMPLE_IDENTIFIER
)
1917 result
+= sizeof(u64
);
1919 if (type
& PERF_SAMPLE_IP
)
1920 result
+= sizeof(u64
);
1922 if (type
& PERF_SAMPLE_TID
)
1923 result
+= sizeof(u64
);
1925 if (type
& PERF_SAMPLE_TIME
)
1926 result
+= sizeof(u64
);
1928 if (type
& PERF_SAMPLE_ADDR
)
1929 result
+= sizeof(u64
);
1931 if (type
& PERF_SAMPLE_ID
)
1932 result
+= sizeof(u64
);
1934 if (type
& PERF_SAMPLE_STREAM_ID
)
1935 result
+= sizeof(u64
);
1937 if (type
& PERF_SAMPLE_CPU
)
1938 result
+= sizeof(u64
);
1940 if (type
& PERF_SAMPLE_PERIOD
)
1941 result
+= sizeof(u64
);
1943 if (type
& PERF_SAMPLE_READ
) {
1944 result
+= sizeof(u64
);
1945 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1946 result
+= sizeof(u64
);
1947 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1948 result
+= sizeof(u64
);
1949 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1950 if (read_format
& PERF_FORMAT_GROUP
) {
1951 sz
= sample
->read
.group
.nr
*
1952 sizeof(struct sample_read_value
);
1955 result
+= sizeof(u64
);
1959 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1960 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
1964 if (type
& PERF_SAMPLE_RAW
) {
1965 result
+= sizeof(u32
);
1966 result
+= sample
->raw_size
;
1969 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1970 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
1975 if (type
& PERF_SAMPLE_REGS_USER
) {
1976 if (sample
->user_regs
.abi
) {
1977 result
+= sizeof(u64
);
1978 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
1981 result
+= sizeof(u64
);
1985 if (type
& PERF_SAMPLE_STACK_USER
) {
1986 sz
= sample
->user_stack
.size
;
1987 result
+= sizeof(u64
);
1990 result
+= sizeof(u64
);
1994 if (type
& PERF_SAMPLE_WEIGHT
)
1995 result
+= sizeof(u64
);
1997 if (type
& PERF_SAMPLE_DATA_SRC
)
1998 result
+= sizeof(u64
);
2000 if (type
& PERF_SAMPLE_TRANSACTION
)
2001 result
+= sizeof(u64
);
2003 if (type
& PERF_SAMPLE_REGS_INTR
) {
2004 if (sample
->intr_regs
.abi
) {
2005 result
+= sizeof(u64
);
2006 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
2009 result
+= sizeof(u64
);
2016 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
,
2018 const struct perf_sample
*sample
,
2024 * used for cross-endian analysis. See git commit 65014ab3
2025 * for why this goofiness is needed.
2029 array
= event
->sample
.array
;
2031 if (type
& PERF_SAMPLE_IDENTIFIER
) {
2032 *array
= sample
->id
;
2036 if (type
& PERF_SAMPLE_IP
) {
2037 *array
= sample
->ip
;
2041 if (type
& PERF_SAMPLE_TID
) {
2042 u
.val32
[0] = sample
->pid
;
2043 u
.val32
[1] = sample
->tid
;
2046 * Inverse of what is done in perf_evsel__parse_sample
2048 u
.val32
[0] = bswap_32(u
.val32
[0]);
2049 u
.val32
[1] = bswap_32(u
.val32
[1]);
2050 u
.val64
= bswap_64(u
.val64
);
2057 if (type
& PERF_SAMPLE_TIME
) {
2058 *array
= sample
->time
;
2062 if (type
& PERF_SAMPLE_ADDR
) {
2063 *array
= sample
->addr
;
2067 if (type
& PERF_SAMPLE_ID
) {
2068 *array
= sample
->id
;
2072 if (type
& PERF_SAMPLE_STREAM_ID
) {
2073 *array
= sample
->stream_id
;
2077 if (type
& PERF_SAMPLE_CPU
) {
2078 u
.val32
[0] = sample
->cpu
;
2081 * Inverse of what is done in perf_evsel__parse_sample
2083 u
.val32
[0] = bswap_32(u
.val32
[0]);
2084 u
.val64
= bswap_64(u
.val64
);
2090 if (type
& PERF_SAMPLE_PERIOD
) {
2091 *array
= sample
->period
;
2095 if (type
& PERF_SAMPLE_READ
) {
2096 if (read_format
& PERF_FORMAT_GROUP
)
2097 *array
= sample
->read
.group
.nr
;
2099 *array
= sample
->read
.one
.value
;
2102 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
2103 *array
= sample
->read
.time_enabled
;
2107 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
2108 *array
= sample
->read
.time_running
;
2112 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2113 if (read_format
& PERF_FORMAT_GROUP
) {
2114 sz
= sample
->read
.group
.nr
*
2115 sizeof(struct sample_read_value
);
2116 memcpy(array
, sample
->read
.group
.values
, sz
);
2117 array
= (void *)array
+ sz
;
2119 *array
= sample
->read
.one
.id
;
2124 if (type
& PERF_SAMPLE_CALLCHAIN
) {
2125 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
2126 memcpy(array
, sample
->callchain
, sz
);
2127 array
= (void *)array
+ sz
;
2130 if (type
& PERF_SAMPLE_RAW
) {
2131 u
.val32
[0] = sample
->raw_size
;
2132 if (WARN_ONCE(swapped
,
2133 "Endianness of raw data not corrected!\n")) {
2135 * Inverse of what is done in perf_evsel__parse_sample
2137 u
.val32
[0] = bswap_32(u
.val32
[0]);
2138 u
.val32
[1] = bswap_32(u
.val32
[1]);
2139 u
.val64
= bswap_64(u
.val64
);
2142 array
= (void *)array
+ sizeof(u32
);
2144 memcpy(array
, sample
->raw_data
, sample
->raw_size
);
2145 array
= (void *)array
+ sample
->raw_size
;
2148 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
2149 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
2151 memcpy(array
, sample
->branch_stack
, sz
);
2152 array
= (void *)array
+ sz
;
2155 if (type
& PERF_SAMPLE_REGS_USER
) {
2156 if (sample
->user_regs
.abi
) {
2157 *array
++ = sample
->user_regs
.abi
;
2158 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
2159 memcpy(array
, sample
->user_regs
.regs
, sz
);
2160 array
= (void *)array
+ sz
;
2166 if (type
& PERF_SAMPLE_STACK_USER
) {
2167 sz
= sample
->user_stack
.size
;
2170 memcpy(array
, sample
->user_stack
.data
, sz
);
2171 array
= (void *)array
+ sz
;
2176 if (type
& PERF_SAMPLE_WEIGHT
) {
2177 *array
= sample
->weight
;
2181 if (type
& PERF_SAMPLE_DATA_SRC
) {
2182 *array
= sample
->data_src
;
2186 if (type
& PERF_SAMPLE_TRANSACTION
) {
2187 *array
= sample
->transaction
;
2191 if (type
& PERF_SAMPLE_REGS_INTR
) {
2192 if (sample
->intr_regs
.abi
) {
2193 *array
++ = sample
->intr_regs
.abi
;
2194 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
2195 memcpy(array
, sample
->intr_regs
.regs
, sz
);
2196 array
= (void *)array
+ sz
;
2205 struct format_field
*perf_evsel__field(struct perf_evsel
*evsel
, const char *name
)
2207 return pevent_find_field(evsel
->tp_format
, name
);
2210 void *perf_evsel__rawptr(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
2213 struct format_field
*field
= perf_evsel__field(evsel
, name
);
2219 offset
= field
->offset
;
2221 if (field
->flags
& FIELD_IS_DYNAMIC
) {
2222 offset
= *(int *)(sample
->raw_data
+ field
->offset
);
2226 return sample
->raw_data
+ offset
;
2229 u64
perf_evsel__intval(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
2232 struct format_field
*field
= perf_evsel__field(evsel
, name
);
2239 ptr
= sample
->raw_data
+ field
->offset
;
2241 switch (field
->size
) {
2245 value
= *(u16
*)ptr
;
2248 value
= *(u32
*)ptr
;
2251 memcpy(&value
, ptr
, sizeof(u64
));
2257 if (!evsel
->needs_swap
)
2260 switch (field
->size
) {
2262 return bswap_16(value
);
2264 return bswap_32(value
);
2266 return bswap_64(value
);
2274 bool perf_evsel__fallback(struct perf_evsel
*evsel
, int err
,
2275 char *msg
, size_t msgsize
)
2279 if ((err
== ENOENT
|| err
== ENXIO
|| err
== ENODEV
) &&
2280 evsel
->attr
.type
== PERF_TYPE_HARDWARE
&&
2281 evsel
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
) {
2283 * If it's cycles then fall back to hrtimer based
2284 * cpu-clock-tick sw counter, which is always available even if
2287 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2290 scnprintf(msg
, msgsize
, "%s",
2291 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2293 evsel
->attr
.type
= PERF_TYPE_SOFTWARE
;
2294 evsel
->attr
.config
= PERF_COUNT_SW_CPU_CLOCK
;
2296 zfree(&evsel
->name
);
2298 } else if (err
== EACCES
&& !evsel
->attr
.exclude_kernel
&&
2299 (paranoid
= perf_event_paranoid()) > 1) {
2300 const char *name
= perf_evsel__name(evsel
);
2303 if (asprintf(&new_name
, "%s%su", name
, strchr(name
, ':') ? "" : ":") < 0)
2308 evsel
->name
= new_name
;
2309 scnprintf(msg
, msgsize
,
2310 "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid
);
2311 evsel
->attr
.exclude_kernel
= 1;
2319 int perf_evsel__open_strerror(struct perf_evsel
*evsel
, struct target
*target
,
2320 int err
, char *msg
, size_t size
)
2322 char sbuf
[STRERR_BUFSIZE
];
2327 return scnprintf(msg
, size
,
2328 "You may not have permission to collect %sstats.\n\n"
2329 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
2330 "which controls use of the performance events system by\n"
2331 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
2332 "The current value is %d:\n\n"
2333 " -1: Allow use of (almost) all events by all users\n"
2334 ">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n"
2335 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
2336 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN",
2337 target
->system_wide
? "system-wide " : "",
2338 perf_event_paranoid());
2340 return scnprintf(msg
, size
, "The %s event is not supported.",
2341 perf_evsel__name(evsel
));
2343 return scnprintf(msg
, size
, "%s",
2344 "Too many events are opened.\n"
2345 "Probably the maximum number of open file descriptors has been reached.\n"
2346 "Hint: Try again after reducing the number of events.\n"
2347 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2349 if ((evsel
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
) != 0 &&
2350 access("/proc/sys/kernel/perf_event_max_stack", F_OK
) == 0)
2351 return scnprintf(msg
, size
,
2352 "Not enough memory to setup event with callchain.\n"
2353 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2354 "Hint: Current value: %d", sysctl_perf_event_max_stack
);
2357 if (target
->cpu_list
)
2358 return scnprintf(msg
, size
, "%s",
2359 "No such device - did you specify an out-of-range profile CPU?");
2362 if (evsel
->attr
.precise_ip
)
2363 return scnprintf(msg
, size
, "%s",
2364 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2365 #if defined(__i386__) || defined(__x86_64__)
2366 if (evsel
->attr
.type
== PERF_TYPE_HARDWARE
)
2367 return scnprintf(msg
, size
, "%s",
2368 "No hardware sampling interrupt available.\n"
2369 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2373 if (find_process("oprofiled"))
2374 return scnprintf(msg
, size
,
2375 "The PMU counters are busy/taken by another profiler.\n"
2376 "We found oprofile daemon running, please stop it and try again.");
2379 if (perf_missing_features
.clockid
)
2380 return scnprintf(msg
, size
, "clockid feature not supported.");
2381 if (perf_missing_features
.clockid_wrong
)
2382 return scnprintf(msg
, size
, "wrong clockid (%d).", clockid
);
2388 return scnprintf(msg
, size
,
2389 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2390 "/bin/dmesg may provide additional information.\n"
2391 "No CONFIG_PERF_EVENTS=y kernel support configured?",
2392 err
, strerror_r(err
, sbuf
, sizeof(sbuf
)),
2393 perf_evsel__name(evsel
));