2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include <linux/bitops.h>
12 #include <api/fs/tracing_path.h>
13 #include <traceevent/event-parse.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/perf_event.h>
16 #include <linux/err.h>
17 #include <sys/resource.h>
19 #include "callchain.h"
25 #include "thread_map.h"
27 #include "perf_regs.h"
29 #include "trace-event.h"
41 } perf_missing_features
;
43 static clockid_t clockid
;
45 static int perf_evsel__no_extra_init(struct perf_evsel
*evsel __maybe_unused
)
50 static void perf_evsel__no_extra_fini(struct perf_evsel
*evsel __maybe_unused
)
56 int (*init
)(struct perf_evsel
*evsel
);
57 void (*fini
)(struct perf_evsel
*evsel
);
58 } perf_evsel__object
= {
59 .size
= sizeof(struct perf_evsel
),
60 .init
= perf_evsel__no_extra_init
,
61 .fini
= perf_evsel__no_extra_fini
,
64 int perf_evsel__object_config(size_t object_size
,
65 int (*init
)(struct perf_evsel
*evsel
),
66 void (*fini
)(struct perf_evsel
*evsel
))
72 if (perf_evsel__object
.size
> object_size
)
75 perf_evsel__object
.size
= object_size
;
79 perf_evsel__object
.init
= init
;
82 perf_evsel__object
.fini
= fini
;
87 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
89 int __perf_evsel__sample_size(u64 sample_type
)
91 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
95 for (i
= 0; i
< 64; i
++) {
96 if (mask
& (1ULL << i
))
106 * __perf_evsel__calc_id_pos - calculate id_pos.
107 * @sample_type: sample type
109 * This function returns the position of the event id (PERF_SAMPLE_ID or
110 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
113 static int __perf_evsel__calc_id_pos(u64 sample_type
)
117 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
120 if (!(sample_type
& PERF_SAMPLE_ID
))
123 if (sample_type
& PERF_SAMPLE_IP
)
126 if (sample_type
& PERF_SAMPLE_TID
)
129 if (sample_type
& PERF_SAMPLE_TIME
)
132 if (sample_type
& PERF_SAMPLE_ADDR
)
139 * __perf_evsel__calc_is_pos - calculate is_pos.
140 * @sample_type: sample type
142 * This function returns the position (counting backwards) of the event id
143 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
144 * sample_id_all is used there is an id sample appended to non-sample events.
146 static int __perf_evsel__calc_is_pos(u64 sample_type
)
150 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
153 if (!(sample_type
& PERF_SAMPLE_ID
))
156 if (sample_type
& PERF_SAMPLE_CPU
)
159 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
165 void perf_evsel__calc_id_pos(struct perf_evsel
*evsel
)
167 evsel
->id_pos
= __perf_evsel__calc_id_pos(evsel
->attr
.sample_type
);
168 evsel
->is_pos
= __perf_evsel__calc_is_pos(evsel
->attr
.sample_type
);
171 void __perf_evsel__set_sample_bit(struct perf_evsel
*evsel
,
172 enum perf_event_sample_format bit
)
174 if (!(evsel
->attr
.sample_type
& bit
)) {
175 evsel
->attr
.sample_type
|= bit
;
176 evsel
->sample_size
+= sizeof(u64
);
177 perf_evsel__calc_id_pos(evsel
);
181 void __perf_evsel__reset_sample_bit(struct perf_evsel
*evsel
,
182 enum perf_event_sample_format bit
)
184 if (evsel
->attr
.sample_type
& bit
) {
185 evsel
->attr
.sample_type
&= ~bit
;
186 evsel
->sample_size
-= sizeof(u64
);
187 perf_evsel__calc_id_pos(evsel
);
191 void perf_evsel__set_sample_id(struct perf_evsel
*evsel
,
192 bool can_sample_identifier
)
194 if (can_sample_identifier
) {
195 perf_evsel__reset_sample_bit(evsel
, ID
);
196 perf_evsel__set_sample_bit(evsel
, IDENTIFIER
);
198 perf_evsel__set_sample_bit(evsel
, ID
);
200 evsel
->attr
.read_format
|= PERF_FORMAT_ID
;
203 void perf_evsel__init(struct perf_evsel
*evsel
,
204 struct perf_event_attr
*attr
, int idx
)
207 evsel
->tracking
= !idx
;
209 evsel
->leader
= evsel
;
212 evsel
->evlist
= NULL
;
214 INIT_LIST_HEAD(&evsel
->node
);
215 INIT_LIST_HEAD(&evsel
->config_terms
);
216 perf_evsel__object
.init(evsel
);
217 evsel
->sample_size
= __perf_evsel__sample_size(attr
->sample_type
);
218 perf_evsel__calc_id_pos(evsel
);
219 evsel
->cmdline_group_boundary
= false;
222 struct perf_evsel
*perf_evsel__new_idx(struct perf_event_attr
*attr
, int idx
)
224 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
227 perf_evsel__init(evsel
, attr
, idx
);
229 if (perf_evsel__is_bpf_output(evsel
)) {
230 evsel
->attr
.sample_type
|= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
231 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
232 evsel
->attr
.sample_period
= 1;
239 * Returns pointer with encoded error via <linux/err.h> interface.
241 struct perf_evsel
*perf_evsel__newtp_idx(const char *sys
, const char *name
, int idx
)
243 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
249 struct perf_event_attr attr
= {
250 .type
= PERF_TYPE_TRACEPOINT
,
251 .sample_type
= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
252 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
255 if (asprintf(&evsel
->name
, "%s:%s", sys
, name
) < 0)
258 evsel
->tp_format
= trace_event__tp_format(sys
, name
);
259 if (IS_ERR(evsel
->tp_format
)) {
260 err
= PTR_ERR(evsel
->tp_format
);
264 event_attr_init(&attr
);
265 attr
.config
= evsel
->tp_format
->id
;
266 attr
.sample_period
= 1;
267 perf_evsel__init(evsel
, &attr
, idx
);
279 const char *perf_evsel__hw_names
[PERF_COUNT_HW_MAX
] = {
287 "stalled-cycles-frontend",
288 "stalled-cycles-backend",
292 static const char *__perf_evsel__hw_name(u64 config
)
294 if (config
< PERF_COUNT_HW_MAX
&& perf_evsel__hw_names
[config
])
295 return perf_evsel__hw_names
[config
];
297 return "unknown-hardware";
300 static int perf_evsel__add_modifiers(struct perf_evsel
*evsel
, char *bf
, size_t size
)
302 int colon
= 0, r
= 0;
303 struct perf_event_attr
*attr
= &evsel
->attr
;
304 bool exclude_guest_default
= false;
306 #define MOD_PRINT(context, mod) do { \
307 if (!attr->exclude_##context) { \
308 if (!colon) colon = ++r; \
309 r += scnprintf(bf + r, size - r, "%c", mod); \
312 if (attr
->exclude_kernel
|| attr
->exclude_user
|| attr
->exclude_hv
) {
313 MOD_PRINT(kernel
, 'k');
314 MOD_PRINT(user
, 'u');
316 exclude_guest_default
= true;
319 if (attr
->precise_ip
) {
322 r
+= scnprintf(bf
+ r
, size
- r
, "%.*s", attr
->precise_ip
, "ppp");
323 exclude_guest_default
= true;
326 if (attr
->exclude_host
|| attr
->exclude_guest
== exclude_guest_default
) {
327 MOD_PRINT(host
, 'H');
328 MOD_PRINT(guest
, 'G');
336 static int perf_evsel__hw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
338 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__hw_name(evsel
->attr
.config
));
339 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
342 const char *perf_evsel__sw_names
[PERF_COUNT_SW_MAX
] = {
355 static const char *__perf_evsel__sw_name(u64 config
)
357 if (config
< PERF_COUNT_SW_MAX
&& perf_evsel__sw_names
[config
])
358 return perf_evsel__sw_names
[config
];
359 return "unknown-software";
362 static int perf_evsel__sw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
364 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__sw_name(evsel
->attr
.config
));
365 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
368 static int __perf_evsel__bp_name(char *bf
, size_t size
, u64 addr
, u64 type
)
372 r
= scnprintf(bf
, size
, "mem:0x%" PRIx64
":", addr
);
374 if (type
& HW_BREAKPOINT_R
)
375 r
+= scnprintf(bf
+ r
, size
- r
, "r");
377 if (type
& HW_BREAKPOINT_W
)
378 r
+= scnprintf(bf
+ r
, size
- r
, "w");
380 if (type
& HW_BREAKPOINT_X
)
381 r
+= scnprintf(bf
+ r
, size
- r
, "x");
386 static int perf_evsel__bp_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
388 struct perf_event_attr
*attr
= &evsel
->attr
;
389 int r
= __perf_evsel__bp_name(bf
, size
, attr
->bp_addr
, attr
->bp_type
);
390 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
393 const char *perf_evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
]
394 [PERF_EVSEL__MAX_ALIASES
] = {
395 { "L1-dcache", "l1-d", "l1d", "L1-data", },
396 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
398 { "dTLB", "d-tlb", "Data-TLB", },
399 { "iTLB", "i-tlb", "Instruction-TLB", },
400 { "branch", "branches", "bpu", "btb", "bpc", },
404 const char *perf_evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
]
405 [PERF_EVSEL__MAX_ALIASES
] = {
406 { "load", "loads", "read", },
407 { "store", "stores", "write", },
408 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
411 const char *perf_evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
]
412 [PERF_EVSEL__MAX_ALIASES
] = {
413 { "refs", "Reference", "ops", "access", },
414 { "misses", "miss", },
417 #define C(x) PERF_COUNT_HW_CACHE_##x
418 #define CACHE_READ (1 << C(OP_READ))
419 #define CACHE_WRITE (1 << C(OP_WRITE))
420 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
421 #define COP(x) (1 << x)
424 * cache operartion stat
425 * L1I : Read and prefetch only
426 * ITLB and BPU : Read-only
428 static unsigned long perf_evsel__hw_cache_stat
[C(MAX
)] = {
429 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
430 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
431 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
432 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
433 [C(ITLB
)] = (CACHE_READ
),
434 [C(BPU
)] = (CACHE_READ
),
435 [C(NODE
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
438 bool perf_evsel__is_cache_op_valid(u8 type
, u8 op
)
440 if (perf_evsel__hw_cache_stat
[type
] & COP(op
))
441 return true; /* valid */
443 return false; /* invalid */
446 int __perf_evsel__hw_cache_type_op_res_name(u8 type
, u8 op
, u8 result
,
447 char *bf
, size_t size
)
450 return scnprintf(bf
, size
, "%s-%s-%s", perf_evsel__hw_cache
[type
][0],
451 perf_evsel__hw_cache_op
[op
][0],
452 perf_evsel__hw_cache_result
[result
][0]);
455 return scnprintf(bf
, size
, "%s-%s", perf_evsel__hw_cache
[type
][0],
456 perf_evsel__hw_cache_op
[op
][1]);
459 static int __perf_evsel__hw_cache_name(u64 config
, char *bf
, size_t size
)
461 u8 op
, result
, type
= (config
>> 0) & 0xff;
462 const char *err
= "unknown-ext-hardware-cache-type";
464 if (type
> PERF_COUNT_HW_CACHE_MAX
)
467 op
= (config
>> 8) & 0xff;
468 err
= "unknown-ext-hardware-cache-op";
469 if (op
> PERF_COUNT_HW_CACHE_OP_MAX
)
472 result
= (config
>> 16) & 0xff;
473 err
= "unknown-ext-hardware-cache-result";
474 if (result
> PERF_COUNT_HW_CACHE_RESULT_MAX
)
477 err
= "invalid-cache";
478 if (!perf_evsel__is_cache_op_valid(type
, op
))
481 return __perf_evsel__hw_cache_type_op_res_name(type
, op
, result
, bf
, size
);
483 return scnprintf(bf
, size
, "%s", err
);
486 static int perf_evsel__hw_cache_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
488 int ret
= __perf_evsel__hw_cache_name(evsel
->attr
.config
, bf
, size
);
489 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
492 static int perf_evsel__raw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
494 int ret
= scnprintf(bf
, size
, "raw 0x%" PRIx64
, evsel
->attr
.config
);
495 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
498 const char *perf_evsel__name(struct perf_evsel
*evsel
)
505 switch (evsel
->attr
.type
) {
507 perf_evsel__raw_name(evsel
, bf
, sizeof(bf
));
510 case PERF_TYPE_HARDWARE
:
511 perf_evsel__hw_name(evsel
, bf
, sizeof(bf
));
514 case PERF_TYPE_HW_CACHE
:
515 perf_evsel__hw_cache_name(evsel
, bf
, sizeof(bf
));
518 case PERF_TYPE_SOFTWARE
:
519 perf_evsel__sw_name(evsel
, bf
, sizeof(bf
));
522 case PERF_TYPE_TRACEPOINT
:
523 scnprintf(bf
, sizeof(bf
), "%s", "unknown tracepoint");
526 case PERF_TYPE_BREAKPOINT
:
527 perf_evsel__bp_name(evsel
, bf
, sizeof(bf
));
531 scnprintf(bf
, sizeof(bf
), "unknown attr type: %d",
536 evsel
->name
= strdup(bf
);
538 return evsel
->name
?: "unknown";
541 const char *perf_evsel__group_name(struct perf_evsel
*evsel
)
543 return evsel
->group_name
?: "anon group";
546 int perf_evsel__group_desc(struct perf_evsel
*evsel
, char *buf
, size_t size
)
549 struct perf_evsel
*pos
;
550 const char *group_name
= perf_evsel__group_name(evsel
);
552 ret
= scnprintf(buf
, size
, "%s", group_name
);
554 ret
+= scnprintf(buf
+ ret
, size
- ret
, " { %s",
555 perf_evsel__name(evsel
));
557 for_each_group_member(pos
, evsel
)
558 ret
+= scnprintf(buf
+ ret
, size
- ret
, ", %s",
559 perf_evsel__name(pos
));
561 ret
+= scnprintf(buf
+ ret
, size
- ret
, " }");
566 void perf_evsel__config_callchain(struct perf_evsel
*evsel
,
567 struct record_opts
*opts
,
568 struct callchain_param
*param
)
570 bool function
= perf_evsel__is_function_event(evsel
);
571 struct perf_event_attr
*attr
= &evsel
->attr
;
573 perf_evsel__set_sample_bit(evsel
, CALLCHAIN
);
575 attr
->sample_max_stack
= param
->max_stack
;
577 if (param
->record_mode
== CALLCHAIN_LBR
) {
578 if (!opts
->branch_stack
) {
579 if (attr
->exclude_user
) {
580 pr_warning("LBR callstack option is only available "
581 "to get user callchain information. "
582 "Falling back to framepointers.\n");
584 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
585 attr
->branch_sample_type
= PERF_SAMPLE_BRANCH_USER
|
586 PERF_SAMPLE_BRANCH_CALL_STACK
|
587 PERF_SAMPLE_BRANCH_NO_CYCLES
|
588 PERF_SAMPLE_BRANCH_NO_FLAGS
;
591 pr_warning("Cannot use LBR callstack with branch stack. "
592 "Falling back to framepointers.\n");
595 if (param
->record_mode
== CALLCHAIN_DWARF
) {
597 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
598 perf_evsel__set_sample_bit(evsel
, STACK_USER
);
599 attr
->sample_regs_user
= PERF_REGS_MASK
;
600 attr
->sample_stack_user
= param
->dump_size
;
601 attr
->exclude_callchain_user
= 1;
603 pr_info("Cannot use DWARF unwind for function trace event,"
604 " falling back to framepointers.\n");
609 pr_info("Disabling user space callchains for function trace event.\n");
610 attr
->exclude_callchain_user
= 1;
615 perf_evsel__reset_callgraph(struct perf_evsel
*evsel
,
616 struct callchain_param
*param
)
618 struct perf_event_attr
*attr
= &evsel
->attr
;
620 perf_evsel__reset_sample_bit(evsel
, CALLCHAIN
);
621 if (param
->record_mode
== CALLCHAIN_LBR
) {
622 perf_evsel__reset_sample_bit(evsel
, BRANCH_STACK
);
623 attr
->branch_sample_type
&= ~(PERF_SAMPLE_BRANCH_USER
|
624 PERF_SAMPLE_BRANCH_CALL_STACK
);
626 if (param
->record_mode
== CALLCHAIN_DWARF
) {
627 perf_evsel__reset_sample_bit(evsel
, REGS_USER
);
628 perf_evsel__reset_sample_bit(evsel
, STACK_USER
);
632 static void apply_config_terms(struct perf_evsel
*evsel
,
633 struct record_opts
*opts
)
635 struct perf_evsel_config_term
*term
;
636 struct list_head
*config_terms
= &evsel
->config_terms
;
637 struct perf_event_attr
*attr
= &evsel
->attr
;
638 struct callchain_param param
;
641 const char *callgraph_buf
= NULL
;
643 /* callgraph default */
644 param
.record_mode
= callchain_param
.record_mode
;
646 list_for_each_entry(term
, config_terms
, list
) {
647 switch (term
->type
) {
648 case PERF_EVSEL__CONFIG_TERM_PERIOD
:
649 attr
->sample_period
= term
->val
.period
;
652 case PERF_EVSEL__CONFIG_TERM_FREQ
:
653 attr
->sample_freq
= term
->val
.freq
;
656 case PERF_EVSEL__CONFIG_TERM_TIME
:
658 perf_evsel__set_sample_bit(evsel
, TIME
);
660 perf_evsel__reset_sample_bit(evsel
, TIME
);
662 case PERF_EVSEL__CONFIG_TERM_CALLGRAPH
:
663 callgraph_buf
= term
->val
.callgraph
;
665 case PERF_EVSEL__CONFIG_TERM_STACK_USER
:
666 dump_size
= term
->val
.stack_user
;
668 case PERF_EVSEL__CONFIG_TERM_MAX_STACK
:
669 max_stack
= term
->val
.max_stack
;
671 case PERF_EVSEL__CONFIG_TERM_INHERIT
:
673 * attr->inherit should has already been set by
674 * perf_evsel__config. If user explicitly set
675 * inherit using config terms, override global
676 * opt->no_inherit setting.
678 attr
->inherit
= term
->val
.inherit
? 1 : 0;
685 /* User explicitly set per-event callgraph, clear the old setting and reset. */
686 if ((callgraph_buf
!= NULL
) || (dump_size
> 0) || max_stack
) {
688 param
.max_stack
= max_stack
;
689 if (callgraph_buf
== NULL
)
690 callgraph_buf
= "fp";
693 /* parse callgraph parameters */
694 if (callgraph_buf
!= NULL
) {
695 if (!strcmp(callgraph_buf
, "no")) {
696 param
.enabled
= false;
697 param
.record_mode
= CALLCHAIN_NONE
;
699 param
.enabled
= true;
700 if (parse_callchain_record(callgraph_buf
, ¶m
)) {
701 pr_err("per-event callgraph setting for %s failed. "
702 "Apply callgraph global setting for it\n",
709 dump_size
= round_up(dump_size
, sizeof(u64
));
710 param
.dump_size
= dump_size
;
713 /* If global callgraph set, clear it */
714 if (callchain_param
.enabled
)
715 perf_evsel__reset_callgraph(evsel
, &callchain_param
);
717 /* set perf-event callgraph */
719 perf_evsel__config_callchain(evsel
, opts
, ¶m
);
724 * The enable_on_exec/disabled value strategy:
726 * 1) For any type of traced program:
727 * - all independent events and group leaders are disabled
728 * - all group members are enabled
730 * Group members are ruled by group leaders. They need to
731 * be enabled, because the group scheduling relies on that.
733 * 2) For traced programs executed by perf:
734 * - all independent events and group leaders have
736 * - we don't specifically enable or disable any event during
739 * Independent events and group leaders are initially disabled
740 * and get enabled by exec. Group members are ruled by group
741 * leaders as stated in 1).
743 * 3) For traced programs attached by perf (pid/tid):
744 * - we specifically enable or disable all events during
747 * When attaching events to already running traced we
748 * enable/disable events specifically, as there's no
749 * initial traced exec call.
751 void perf_evsel__config(struct perf_evsel
*evsel
, struct record_opts
*opts
,
752 struct callchain_param
*callchain
)
754 struct perf_evsel
*leader
= evsel
->leader
;
755 struct perf_event_attr
*attr
= &evsel
->attr
;
756 int track
= evsel
->tracking
;
757 bool per_cpu
= opts
->target
.default_per_cpu
&& !opts
->target
.per_thread
;
759 attr
->sample_id_all
= perf_missing_features
.sample_id_all
? 0 : 1;
760 attr
->inherit
= !opts
->no_inherit
;
762 perf_evsel__set_sample_bit(evsel
, IP
);
763 perf_evsel__set_sample_bit(evsel
, TID
);
765 if (evsel
->sample_read
) {
766 perf_evsel__set_sample_bit(evsel
, READ
);
769 * We need ID even in case of single event, because
770 * PERF_SAMPLE_READ process ID specific data.
772 perf_evsel__set_sample_id(evsel
, false);
775 * Apply group format only if we belong to group
776 * with more than one members.
778 if (leader
->nr_members
> 1) {
779 attr
->read_format
|= PERF_FORMAT_GROUP
;
785 * We default some events to have a default interval. But keep
786 * it a weak assumption overridable by the user.
788 if (!attr
->sample_period
|| (opts
->user_freq
!= UINT_MAX
||
789 opts
->user_interval
!= ULLONG_MAX
)) {
791 perf_evsel__set_sample_bit(evsel
, PERIOD
);
793 attr
->sample_freq
= opts
->freq
;
795 attr
->sample_period
= opts
->default_interval
;
800 * Disable sampling for all group members other
801 * than leader in case leader 'leads' the sampling.
803 if ((leader
!= evsel
) && leader
->sample_read
) {
804 attr
->sample_freq
= 0;
805 attr
->sample_period
= 0;
808 if (opts
->no_samples
)
809 attr
->sample_freq
= 0;
811 if (opts
->inherit_stat
)
812 attr
->inherit_stat
= 1;
814 if (opts
->sample_address
) {
815 perf_evsel__set_sample_bit(evsel
, ADDR
);
816 attr
->mmap_data
= track
;
820 * We don't allow user space callchains for function trace
821 * event, due to issues with page faults while tracing page
822 * fault handler and its overall trickiness nature.
824 if (perf_evsel__is_function_event(evsel
))
825 evsel
->attr
.exclude_callchain_user
= 1;
827 if (callchain
&& callchain
->enabled
&& !evsel
->no_aux_samples
)
828 perf_evsel__config_callchain(evsel
, opts
, callchain
);
830 if (opts
->sample_intr_regs
) {
831 attr
->sample_regs_intr
= opts
->sample_intr_regs
;
832 perf_evsel__set_sample_bit(evsel
, REGS_INTR
);
835 if (target__has_cpu(&opts
->target
))
836 perf_evsel__set_sample_bit(evsel
, CPU
);
839 perf_evsel__set_sample_bit(evsel
, PERIOD
);
842 * When the user explicitely disabled time don't force it here.
844 if (opts
->sample_time
&&
845 (!perf_missing_features
.sample_id_all
&&
846 (!opts
->no_inherit
|| target__has_cpu(&opts
->target
) || per_cpu
||
847 opts
->sample_time_set
)))
848 perf_evsel__set_sample_bit(evsel
, TIME
);
850 if (opts
->raw_samples
&& !evsel
->no_aux_samples
) {
851 perf_evsel__set_sample_bit(evsel
, TIME
);
852 perf_evsel__set_sample_bit(evsel
, RAW
);
853 perf_evsel__set_sample_bit(evsel
, CPU
);
856 if (opts
->sample_address
)
857 perf_evsel__set_sample_bit(evsel
, DATA_SRC
);
859 if (opts
->no_buffering
) {
861 attr
->wakeup_events
= 1;
863 if (opts
->branch_stack
&& !evsel
->no_aux_samples
) {
864 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
865 attr
->branch_sample_type
= opts
->branch_stack
;
868 if (opts
->sample_weight
)
869 perf_evsel__set_sample_bit(evsel
, WEIGHT
);
873 attr
->mmap2
= track
&& !perf_missing_features
.mmap2
;
876 if (opts
->record_switch_events
)
877 attr
->context_switch
= track
;
879 if (opts
->sample_transaction
)
880 perf_evsel__set_sample_bit(evsel
, TRANSACTION
);
882 if (opts
->running_time
) {
883 evsel
->attr
.read_format
|=
884 PERF_FORMAT_TOTAL_TIME_ENABLED
|
885 PERF_FORMAT_TOTAL_TIME_RUNNING
;
889 * XXX see the function comment above
891 * Disabling only independent events or group leaders,
892 * keeping group members enabled.
894 if (perf_evsel__is_group_leader(evsel
))
898 * Setting enable_on_exec for independent events and
899 * group leaders for traced executed by perf.
901 if (target__none(&opts
->target
) && perf_evsel__is_group_leader(evsel
) &&
902 !opts
->initial_delay
)
903 attr
->enable_on_exec
= 1;
905 if (evsel
->immediate
) {
907 attr
->enable_on_exec
= 0;
910 clockid
= opts
->clockid
;
911 if (opts
->use_clockid
) {
912 attr
->use_clockid
= 1;
913 attr
->clockid
= opts
->clockid
;
916 if (evsel
->precise_max
)
917 perf_event_attr__set_max_precise_ip(attr
);
919 if (opts
->all_user
) {
920 attr
->exclude_kernel
= 1;
921 attr
->exclude_user
= 0;
924 if (opts
->all_kernel
) {
925 attr
->exclude_kernel
= 0;
926 attr
->exclude_user
= 1;
930 * Apply event specific term settings,
931 * it overloads any global configuration.
933 apply_config_terms(evsel
, opts
);
936 static int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
940 if (evsel
->system_wide
)
943 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
946 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
947 for (thread
= 0; thread
< nthreads
; thread
++) {
948 FD(evsel
, cpu
, thread
) = -1;
953 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
956 static int perf_evsel__run_ioctl(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
961 if (evsel
->system_wide
)
964 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
965 for (thread
= 0; thread
< nthreads
; thread
++) {
966 int fd
= FD(evsel
, cpu
, thread
),
967 err
= ioctl(fd
, ioc
, arg
);
977 int perf_evsel__apply_filter(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
980 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
981 PERF_EVENT_IOC_SET_FILTER
,
985 int perf_evsel__set_filter(struct perf_evsel
*evsel
, const char *filter
)
987 char *new_filter
= strdup(filter
);
989 if (new_filter
!= NULL
) {
991 evsel
->filter
= new_filter
;
998 int perf_evsel__append_filter(struct perf_evsel
*evsel
,
999 const char *op
, const char *filter
)
1003 if (evsel
->filter
== NULL
)
1004 return perf_evsel__set_filter(evsel
, filter
);
1006 if (asprintf(&new_filter
,"(%s) %s (%s)", evsel
->filter
, op
, filter
) > 0) {
1007 free(evsel
->filter
);
1008 evsel
->filter
= new_filter
;
1015 int perf_evsel__enable(struct perf_evsel
*evsel
)
1017 int nthreads
= thread_map__nr(evsel
->threads
);
1018 int ncpus
= cpu_map__nr(evsel
->cpus
);
1020 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
1021 PERF_EVENT_IOC_ENABLE
,
1025 int perf_evsel__disable(struct perf_evsel
*evsel
)
1027 int nthreads
= thread_map__nr(evsel
->threads
);
1028 int ncpus
= cpu_map__nr(evsel
->cpus
);
1030 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
1031 PERF_EVENT_IOC_DISABLE
,
1035 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1037 if (ncpus
== 0 || nthreads
== 0)
1040 if (evsel
->system_wide
)
1043 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
1044 if (evsel
->sample_id
== NULL
)
1047 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
1048 if (evsel
->id
== NULL
) {
1049 xyarray__delete(evsel
->sample_id
);
1050 evsel
->sample_id
= NULL
;
1057 static void perf_evsel__free_fd(struct perf_evsel
*evsel
)
1059 xyarray__delete(evsel
->fd
);
1063 static void perf_evsel__free_id(struct perf_evsel
*evsel
)
1065 xyarray__delete(evsel
->sample_id
);
1066 evsel
->sample_id
= NULL
;
1070 static void perf_evsel__free_config_terms(struct perf_evsel
*evsel
)
1072 struct perf_evsel_config_term
*term
, *h
;
1074 list_for_each_entry_safe(term
, h
, &evsel
->config_terms
, list
) {
1075 list_del(&term
->list
);
1080 void perf_evsel__close_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1084 if (evsel
->system_wide
)
1087 for (cpu
= 0; cpu
< ncpus
; cpu
++)
1088 for (thread
= 0; thread
< nthreads
; ++thread
) {
1089 close(FD(evsel
, cpu
, thread
));
1090 FD(evsel
, cpu
, thread
) = -1;
1094 void perf_evsel__exit(struct perf_evsel
*evsel
)
1096 assert(list_empty(&evsel
->node
));
1097 assert(evsel
->evlist
== NULL
);
1098 perf_evsel__free_fd(evsel
);
1099 perf_evsel__free_id(evsel
);
1100 perf_evsel__free_config_terms(evsel
);
1101 close_cgroup(evsel
->cgrp
);
1102 cpu_map__put(evsel
->cpus
);
1103 cpu_map__put(evsel
->own_cpus
);
1104 thread_map__put(evsel
->threads
);
1105 zfree(&evsel
->group_name
);
1106 zfree(&evsel
->name
);
1107 perf_evsel__object
.fini(evsel
);
1110 void perf_evsel__delete(struct perf_evsel
*evsel
)
1112 perf_evsel__exit(evsel
);
1116 void perf_evsel__compute_deltas(struct perf_evsel
*evsel
, int cpu
, int thread
,
1117 struct perf_counts_values
*count
)
1119 struct perf_counts_values tmp
;
1121 if (!evsel
->prev_raw_counts
)
1125 tmp
= evsel
->prev_raw_counts
->aggr
;
1126 evsel
->prev_raw_counts
->aggr
= *count
;
1128 tmp
= *perf_counts(evsel
->prev_raw_counts
, cpu
, thread
);
1129 *perf_counts(evsel
->prev_raw_counts
, cpu
, thread
) = *count
;
1132 count
->val
= count
->val
- tmp
.val
;
1133 count
->ena
= count
->ena
- tmp
.ena
;
1134 count
->run
= count
->run
- tmp
.run
;
1137 void perf_counts_values__scale(struct perf_counts_values
*count
,
1138 bool scale
, s8
*pscaled
)
1143 if (count
->run
== 0) {
1146 } else if (count
->run
< count
->ena
) {
1148 count
->val
= (u64
)((double) count
->val
* count
->ena
/ count
->run
+ 0.5);
1151 count
->ena
= count
->run
= 0;
1157 int perf_evsel__read(struct perf_evsel
*evsel
, int cpu
, int thread
,
1158 struct perf_counts_values
*count
)
1160 memset(count
, 0, sizeof(*count
));
1162 if (FD(evsel
, cpu
, thread
) < 0)
1165 if (readn(FD(evsel
, cpu
, thread
), count
, sizeof(*count
)) < 0)
1171 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
1172 int cpu
, int thread
, bool scale
)
1174 struct perf_counts_values count
;
1175 size_t nv
= scale
? 3 : 1;
1177 if (FD(evsel
, cpu
, thread
) < 0)
1180 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1, thread
+ 1) < 0)
1183 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) < 0)
1186 perf_evsel__compute_deltas(evsel
, cpu
, thread
, &count
);
1187 perf_counts_values__scale(&count
, scale
, NULL
);
1188 *perf_counts(evsel
->counts
, cpu
, thread
) = count
;
1192 static int get_group_fd(struct perf_evsel
*evsel
, int cpu
, int thread
)
1194 struct perf_evsel
*leader
= evsel
->leader
;
1197 if (perf_evsel__is_group_leader(evsel
))
1201 * Leader must be already processed/open,
1202 * if not it's a bug.
1204 BUG_ON(!leader
->fd
);
1206 fd
= FD(leader
, cpu
, thread
);
1217 static void __p_bits(char *buf
, size_t size
, u64 value
, struct bit_names
*bits
)
1219 bool first_bit
= true;
1223 if (value
& bits
[i
].bit
) {
1224 buf
+= scnprintf(buf
, size
, "%s%s", first_bit
? "" : "|", bits
[i
].name
);
1227 } while (bits
[++i
].name
!= NULL
);
1230 static void __p_sample_type(char *buf
, size_t size
, u64 value
)
1232 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1233 struct bit_names bits
[] = {
1234 bit_name(IP
), bit_name(TID
), bit_name(TIME
), bit_name(ADDR
),
1235 bit_name(READ
), bit_name(CALLCHAIN
), bit_name(ID
), bit_name(CPU
),
1236 bit_name(PERIOD
), bit_name(STREAM_ID
), bit_name(RAW
),
1237 bit_name(BRANCH_STACK
), bit_name(REGS_USER
), bit_name(STACK_USER
),
1238 bit_name(IDENTIFIER
), bit_name(REGS_INTR
), bit_name(DATA_SRC
),
1243 __p_bits(buf
, size
, value
, bits
);
1246 static void __p_branch_sample_type(char *buf
, size_t size
, u64 value
)
1248 #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
1249 struct bit_names bits
[] = {
1250 bit_name(USER
), bit_name(KERNEL
), bit_name(HV
), bit_name(ANY
),
1251 bit_name(ANY_CALL
), bit_name(ANY_RETURN
), bit_name(IND_CALL
),
1252 bit_name(ABORT_TX
), bit_name(IN_TX
), bit_name(NO_TX
),
1253 bit_name(COND
), bit_name(CALL_STACK
), bit_name(IND_JUMP
),
1254 bit_name(CALL
), bit_name(NO_FLAGS
), bit_name(NO_CYCLES
),
1258 __p_bits(buf
, size
, value
, bits
);
1261 static void __p_read_format(char *buf
, size_t size
, u64 value
)
1263 #define bit_name(n) { PERF_FORMAT_##n, #n }
1264 struct bit_names bits
[] = {
1265 bit_name(TOTAL_TIME_ENABLED
), bit_name(TOTAL_TIME_RUNNING
),
1266 bit_name(ID
), bit_name(GROUP
),
1270 __p_bits(buf
, size
, value
, bits
);
1273 #define BUF_SIZE 1024
1275 #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1276 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1277 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1278 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
1279 #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
1280 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
1282 #define PRINT_ATTRn(_n, _f, _p) \
1286 ret += attr__fprintf(fp, _n, buf, priv);\
1290 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
1292 int perf_event_attr__fprintf(FILE *fp
, struct perf_event_attr
*attr
,
1293 attr__fprintf_f attr__fprintf
, void *priv
)
1298 PRINT_ATTRf(type
, p_unsigned
);
1299 PRINT_ATTRf(size
, p_unsigned
);
1300 PRINT_ATTRf(config
, p_hex
);
1301 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period
, p_unsigned
);
1302 PRINT_ATTRf(sample_type
, p_sample_type
);
1303 PRINT_ATTRf(read_format
, p_read_format
);
1305 PRINT_ATTRf(disabled
, p_unsigned
);
1306 PRINT_ATTRf(inherit
, p_unsigned
);
1307 PRINT_ATTRf(pinned
, p_unsigned
);
1308 PRINT_ATTRf(exclusive
, p_unsigned
);
1309 PRINT_ATTRf(exclude_user
, p_unsigned
);
1310 PRINT_ATTRf(exclude_kernel
, p_unsigned
);
1311 PRINT_ATTRf(exclude_hv
, p_unsigned
);
1312 PRINT_ATTRf(exclude_idle
, p_unsigned
);
1313 PRINT_ATTRf(mmap
, p_unsigned
);
1314 PRINT_ATTRf(comm
, p_unsigned
);
1315 PRINT_ATTRf(freq
, p_unsigned
);
1316 PRINT_ATTRf(inherit_stat
, p_unsigned
);
1317 PRINT_ATTRf(enable_on_exec
, p_unsigned
);
1318 PRINT_ATTRf(task
, p_unsigned
);
1319 PRINT_ATTRf(watermark
, p_unsigned
);
1320 PRINT_ATTRf(precise_ip
, p_unsigned
);
1321 PRINT_ATTRf(mmap_data
, p_unsigned
);
1322 PRINT_ATTRf(sample_id_all
, p_unsigned
);
1323 PRINT_ATTRf(exclude_host
, p_unsigned
);
1324 PRINT_ATTRf(exclude_guest
, p_unsigned
);
1325 PRINT_ATTRf(exclude_callchain_kernel
, p_unsigned
);
1326 PRINT_ATTRf(exclude_callchain_user
, p_unsigned
);
1327 PRINT_ATTRf(mmap2
, p_unsigned
);
1328 PRINT_ATTRf(comm_exec
, p_unsigned
);
1329 PRINT_ATTRf(use_clockid
, p_unsigned
);
1330 PRINT_ATTRf(context_switch
, p_unsigned
);
1331 PRINT_ATTRf(write_backward
, p_unsigned
);
1333 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events
, p_unsigned
);
1334 PRINT_ATTRf(bp_type
, p_unsigned
);
1335 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr
, p_hex
);
1336 PRINT_ATTRn("{ bp_len, config2 }", bp_len
, p_hex
);
1337 PRINT_ATTRf(branch_sample_type
, p_branch_sample_type
);
1338 PRINT_ATTRf(sample_regs_user
, p_hex
);
1339 PRINT_ATTRf(sample_stack_user
, p_unsigned
);
1340 PRINT_ATTRf(clockid
, p_signed
);
1341 PRINT_ATTRf(sample_regs_intr
, p_hex
);
1342 PRINT_ATTRf(aux_watermark
, p_unsigned
);
1343 PRINT_ATTRf(sample_max_stack
, p_unsigned
);
1348 static int __open_attr__fprintf(FILE *fp
, const char *name
, const char *val
,
1349 void *priv
__attribute__((unused
)))
1351 return fprintf(fp
, " %-32s %s\n", name
, val
);
1354 static int __perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1355 struct thread_map
*threads
)
1357 int cpu
, thread
, nthreads
;
1358 unsigned long flags
= PERF_FLAG_FD_CLOEXEC
;
1360 enum { NO_CHANGE
, SET_TO_MAX
, INCREASED_MAX
} set_rlimit
= NO_CHANGE
;
1362 if (evsel
->system_wide
)
1365 nthreads
= threads
->nr
;
1367 if (evsel
->fd
== NULL
&&
1368 perf_evsel__alloc_fd(evsel
, cpus
->nr
, nthreads
) < 0)
1372 flags
|= PERF_FLAG_PID_CGROUP
;
1373 pid
= evsel
->cgrp
->fd
;
1376 fallback_missing_features
:
1377 if (perf_missing_features
.clockid_wrong
)
1378 evsel
->attr
.clockid
= CLOCK_MONOTONIC
; /* should always work */
1379 if (perf_missing_features
.clockid
) {
1380 evsel
->attr
.use_clockid
= 0;
1381 evsel
->attr
.clockid
= 0;
1383 if (perf_missing_features
.cloexec
)
1384 flags
&= ~(unsigned long)PERF_FLAG_FD_CLOEXEC
;
1385 if (perf_missing_features
.mmap2
)
1386 evsel
->attr
.mmap2
= 0;
1387 if (perf_missing_features
.exclude_guest
)
1388 evsel
->attr
.exclude_guest
= evsel
->attr
.exclude_host
= 0;
1389 if (perf_missing_features
.lbr_flags
)
1390 evsel
->attr
.branch_sample_type
&= ~(PERF_SAMPLE_BRANCH_NO_FLAGS
|
1391 PERF_SAMPLE_BRANCH_NO_CYCLES
);
1392 if (perf_missing_features
.write_backward
)
1393 evsel
->attr
.write_backward
= false;
1395 if (perf_missing_features
.sample_id_all
)
1396 evsel
->attr
.sample_id_all
= 0;
1399 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1400 fprintf(stderr
, "perf_event_attr:\n");
1401 perf_event_attr__fprintf(stderr
, &evsel
->attr
, __open_attr__fprintf
, NULL
);
1402 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1405 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
1407 for (thread
= 0; thread
< nthreads
; thread
++) {
1410 if (!evsel
->cgrp
&& !evsel
->system_wide
)
1411 pid
= thread_map__pid(threads
, thread
);
1413 group_fd
= get_group_fd(evsel
, cpu
, thread
);
1415 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
1416 pid
, cpus
->map
[cpu
], group_fd
, flags
);
1418 FD(evsel
, cpu
, thread
) = sys_perf_event_open(&evsel
->attr
,
1422 if (FD(evsel
, cpu
, thread
) < 0) {
1424 pr_debug2("sys_perf_event_open failed, error %d\n",
1429 if (evsel
->bpf_fd
>= 0) {
1430 int evt_fd
= FD(evsel
, cpu
, thread
);
1431 int bpf_fd
= evsel
->bpf_fd
;
1434 PERF_EVENT_IOC_SET_BPF
,
1436 if (err
&& errno
!= EEXIST
) {
1437 pr_err("failed to attach bpf fd %d: %s\n",
1438 bpf_fd
, strerror(errno
));
1444 set_rlimit
= NO_CHANGE
;
1447 * If we succeeded but had to kill clockid, fail and
1448 * have perf_evsel__open_strerror() print us a nice
1451 if (perf_missing_features
.clockid
||
1452 perf_missing_features
.clockid_wrong
) {
1457 if (evsel
->overwrite
&&
1458 perf_missing_features
.write_backward
) {
1469 * perf stat needs between 5 and 22 fds per CPU. When we run out
1470 * of them try to increase the limits.
1472 if (err
== -EMFILE
&& set_rlimit
< INCREASED_MAX
) {
1474 int old_errno
= errno
;
1476 if (getrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1477 if (set_rlimit
== NO_CHANGE
)
1478 l
.rlim_cur
= l
.rlim_max
;
1480 l
.rlim_cur
= l
.rlim_max
+ 1000;
1481 l
.rlim_max
= l
.rlim_cur
;
1483 if (setrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1492 if (err
!= -EINVAL
|| cpu
> 0 || thread
> 0)
1496 * Must probe features in the order they were added to the
1497 * perf_event_attr interface.
1499 if (!perf_missing_features
.clockid_wrong
&& evsel
->attr
.use_clockid
) {
1500 perf_missing_features
.clockid_wrong
= true;
1501 goto fallback_missing_features
;
1502 } else if (!perf_missing_features
.clockid
&& evsel
->attr
.use_clockid
) {
1503 perf_missing_features
.clockid
= true;
1504 goto fallback_missing_features
;
1505 } else if (!perf_missing_features
.cloexec
&& (flags
& PERF_FLAG_FD_CLOEXEC
)) {
1506 perf_missing_features
.cloexec
= true;
1507 goto fallback_missing_features
;
1508 } else if (!perf_missing_features
.mmap2
&& evsel
->attr
.mmap2
) {
1509 perf_missing_features
.mmap2
= true;
1510 goto fallback_missing_features
;
1511 } else if (!perf_missing_features
.exclude_guest
&&
1512 (evsel
->attr
.exclude_guest
|| evsel
->attr
.exclude_host
)) {
1513 perf_missing_features
.exclude_guest
= true;
1514 goto fallback_missing_features
;
1515 } else if (!perf_missing_features
.sample_id_all
) {
1516 perf_missing_features
.sample_id_all
= true;
1517 goto retry_sample_id
;
1518 } else if (!perf_missing_features
.lbr_flags
&&
1519 (evsel
->attr
.branch_sample_type
&
1520 (PERF_SAMPLE_BRANCH_NO_CYCLES
|
1521 PERF_SAMPLE_BRANCH_NO_FLAGS
))) {
1522 perf_missing_features
.lbr_flags
= true;
1523 goto fallback_missing_features
;
1524 } else if (!perf_missing_features
.write_backward
&&
1525 evsel
->attr
.write_backward
) {
1526 perf_missing_features
.write_backward
= true;
1527 goto fallback_missing_features
;
1532 while (--thread
>= 0) {
1533 close(FD(evsel
, cpu
, thread
));
1534 FD(evsel
, cpu
, thread
) = -1;
1537 } while (--cpu
>= 0);
1541 void perf_evsel__close(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1543 if (evsel
->fd
== NULL
)
1546 perf_evsel__close_fd(evsel
, ncpus
, nthreads
);
1547 perf_evsel__free_fd(evsel
);
1559 struct thread_map map
;
1561 } empty_thread_map
= {
1566 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1567 struct thread_map
*threads
)
1570 /* Work around old compiler warnings about strict aliasing */
1571 cpus
= &empty_cpu_map
.map
;
1574 if (threads
== NULL
)
1575 threads
= &empty_thread_map
.map
;
1577 return __perf_evsel__open(evsel
, cpus
, threads
);
1580 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
1581 struct cpu_map
*cpus
)
1583 return __perf_evsel__open(evsel
, cpus
, &empty_thread_map
.map
);
1586 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
1587 struct thread_map
*threads
)
1589 return __perf_evsel__open(evsel
, &empty_cpu_map
.map
, threads
);
1592 static int perf_evsel__parse_id_sample(const struct perf_evsel
*evsel
,
1593 const union perf_event
*event
,
1594 struct perf_sample
*sample
)
1596 u64 type
= evsel
->attr
.sample_type
;
1597 const u64
*array
= event
->sample
.array
;
1598 bool swapped
= evsel
->needs_swap
;
1601 array
+= ((event
->header
.size
-
1602 sizeof(event
->header
)) / sizeof(u64
)) - 1;
1604 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1605 sample
->id
= *array
;
1609 if (type
& PERF_SAMPLE_CPU
) {
1612 /* undo swap of u64, then swap on individual u32s */
1613 u
.val64
= bswap_64(u
.val64
);
1614 u
.val32
[0] = bswap_32(u
.val32
[0]);
1617 sample
->cpu
= u
.val32
[0];
1621 if (type
& PERF_SAMPLE_STREAM_ID
) {
1622 sample
->stream_id
= *array
;
1626 if (type
& PERF_SAMPLE_ID
) {
1627 sample
->id
= *array
;
1631 if (type
& PERF_SAMPLE_TIME
) {
1632 sample
->time
= *array
;
1636 if (type
& PERF_SAMPLE_TID
) {
1639 /* undo swap of u64, then swap on individual u32s */
1640 u
.val64
= bswap_64(u
.val64
);
1641 u
.val32
[0] = bswap_32(u
.val32
[0]);
1642 u
.val32
[1] = bswap_32(u
.val32
[1]);
1645 sample
->pid
= u
.val32
[0];
1646 sample
->tid
= u
.val32
[1];
1653 static inline bool overflow(const void *endp
, u16 max_size
, const void *offset
,
1656 return size
> max_size
|| offset
+ size
> endp
;
1659 #define OVERFLOW_CHECK(offset, size, max_size) \
1661 if (overflow(endp, (max_size), (offset), (size))) \
1665 #define OVERFLOW_CHECK_u64(offset) \
1666 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1668 int perf_evsel__parse_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
1669 struct perf_sample
*data
)
1671 u64 type
= evsel
->attr
.sample_type
;
1672 bool swapped
= evsel
->needs_swap
;
1674 u16 max_size
= event
->header
.size
;
1675 const void *endp
= (void *)event
+ max_size
;
1679 * used for cross-endian analysis. See git commit 65014ab3
1680 * for why this goofiness is needed.
1684 memset(data
, 0, sizeof(*data
));
1685 data
->cpu
= data
->pid
= data
->tid
= -1;
1686 data
->stream_id
= data
->id
= data
->time
= -1ULL;
1687 data
->period
= evsel
->attr
.sample_period
;
1689 data
->cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1691 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
1692 if (!evsel
->attr
.sample_id_all
)
1694 return perf_evsel__parse_id_sample(evsel
, event
, data
);
1697 array
= event
->sample
.array
;
1700 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1701 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
1702 * check the format does not go past the end of the event.
1704 if (evsel
->sample_size
+ sizeof(event
->header
) > event
->header
.size
)
1708 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1713 if (type
& PERF_SAMPLE_IP
) {
1718 if (type
& PERF_SAMPLE_TID
) {
1721 /* undo swap of u64, then swap on individual u32s */
1722 u
.val64
= bswap_64(u
.val64
);
1723 u
.val32
[0] = bswap_32(u
.val32
[0]);
1724 u
.val32
[1] = bswap_32(u
.val32
[1]);
1727 data
->pid
= u
.val32
[0];
1728 data
->tid
= u
.val32
[1];
1732 if (type
& PERF_SAMPLE_TIME
) {
1733 data
->time
= *array
;
1738 if (type
& PERF_SAMPLE_ADDR
) {
1739 data
->addr
= *array
;
1743 if (type
& PERF_SAMPLE_ID
) {
1748 if (type
& PERF_SAMPLE_STREAM_ID
) {
1749 data
->stream_id
= *array
;
1753 if (type
& PERF_SAMPLE_CPU
) {
1757 /* undo swap of u64, then swap on individual u32s */
1758 u
.val64
= bswap_64(u
.val64
);
1759 u
.val32
[0] = bswap_32(u
.val32
[0]);
1762 data
->cpu
= u
.val32
[0];
1766 if (type
& PERF_SAMPLE_PERIOD
) {
1767 data
->period
= *array
;
1771 if (type
& PERF_SAMPLE_READ
) {
1772 u64 read_format
= evsel
->attr
.read_format
;
1774 OVERFLOW_CHECK_u64(array
);
1775 if (read_format
& PERF_FORMAT_GROUP
)
1776 data
->read
.group
.nr
= *array
;
1778 data
->read
.one
.value
= *array
;
1782 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1783 OVERFLOW_CHECK_u64(array
);
1784 data
->read
.time_enabled
= *array
;
1788 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1789 OVERFLOW_CHECK_u64(array
);
1790 data
->read
.time_running
= *array
;
1794 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1795 if (read_format
& PERF_FORMAT_GROUP
) {
1796 const u64 max_group_nr
= UINT64_MAX
/
1797 sizeof(struct sample_read_value
);
1799 if (data
->read
.group
.nr
> max_group_nr
)
1801 sz
= data
->read
.group
.nr
*
1802 sizeof(struct sample_read_value
);
1803 OVERFLOW_CHECK(array
, sz
, max_size
);
1804 data
->read
.group
.values
=
1805 (struct sample_read_value
*)array
;
1806 array
= (void *)array
+ sz
;
1808 OVERFLOW_CHECK_u64(array
);
1809 data
->read
.one
.id
= *array
;
1814 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1815 const u64 max_callchain_nr
= UINT64_MAX
/ sizeof(u64
);
1817 OVERFLOW_CHECK_u64(array
);
1818 data
->callchain
= (struct ip_callchain
*)array
++;
1819 if (data
->callchain
->nr
> max_callchain_nr
)
1821 sz
= data
->callchain
->nr
* sizeof(u64
);
1822 OVERFLOW_CHECK(array
, sz
, max_size
);
1823 array
= (void *)array
+ sz
;
1826 if (type
& PERF_SAMPLE_RAW
) {
1827 OVERFLOW_CHECK_u64(array
);
1829 if (WARN_ONCE(swapped
,
1830 "Endianness of raw data not corrected!\n")) {
1831 /* undo swap of u64, then swap on individual u32s */
1832 u
.val64
= bswap_64(u
.val64
);
1833 u
.val32
[0] = bswap_32(u
.val32
[0]);
1834 u
.val32
[1] = bswap_32(u
.val32
[1]);
1836 data
->raw_size
= u
.val32
[0];
1837 array
= (void *)array
+ sizeof(u32
);
1839 OVERFLOW_CHECK(array
, data
->raw_size
, max_size
);
1840 data
->raw_data
= (void *)array
;
1841 array
= (void *)array
+ data
->raw_size
;
1844 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1845 const u64 max_branch_nr
= UINT64_MAX
/
1846 sizeof(struct branch_entry
);
1848 OVERFLOW_CHECK_u64(array
);
1849 data
->branch_stack
= (struct branch_stack
*)array
++;
1851 if (data
->branch_stack
->nr
> max_branch_nr
)
1853 sz
= data
->branch_stack
->nr
* sizeof(struct branch_entry
);
1854 OVERFLOW_CHECK(array
, sz
, max_size
);
1855 array
= (void *)array
+ sz
;
1858 if (type
& PERF_SAMPLE_REGS_USER
) {
1859 OVERFLOW_CHECK_u64(array
);
1860 data
->user_regs
.abi
= *array
;
1863 if (data
->user_regs
.abi
) {
1864 u64 mask
= evsel
->attr
.sample_regs_user
;
1866 sz
= hweight_long(mask
) * sizeof(u64
);
1867 OVERFLOW_CHECK(array
, sz
, max_size
);
1868 data
->user_regs
.mask
= mask
;
1869 data
->user_regs
.regs
= (u64
*)array
;
1870 array
= (void *)array
+ sz
;
1874 if (type
& PERF_SAMPLE_STACK_USER
) {
1875 OVERFLOW_CHECK_u64(array
);
1878 data
->user_stack
.offset
= ((char *)(array
- 1)
1882 data
->user_stack
.size
= 0;
1884 OVERFLOW_CHECK(array
, sz
, max_size
);
1885 data
->user_stack
.data
= (char *)array
;
1886 array
= (void *)array
+ sz
;
1887 OVERFLOW_CHECK_u64(array
);
1888 data
->user_stack
.size
= *array
++;
1889 if (WARN_ONCE(data
->user_stack
.size
> sz
,
1890 "user stack dump failure\n"))
1896 if (type
& PERF_SAMPLE_WEIGHT
) {
1897 OVERFLOW_CHECK_u64(array
);
1898 data
->weight
= *array
;
1902 data
->data_src
= PERF_MEM_DATA_SRC_NONE
;
1903 if (type
& PERF_SAMPLE_DATA_SRC
) {
1904 OVERFLOW_CHECK_u64(array
);
1905 data
->data_src
= *array
;
1909 data
->transaction
= 0;
1910 if (type
& PERF_SAMPLE_TRANSACTION
) {
1911 OVERFLOW_CHECK_u64(array
);
1912 data
->transaction
= *array
;
1916 data
->intr_regs
.abi
= PERF_SAMPLE_REGS_ABI_NONE
;
1917 if (type
& PERF_SAMPLE_REGS_INTR
) {
1918 OVERFLOW_CHECK_u64(array
);
1919 data
->intr_regs
.abi
= *array
;
1922 if (data
->intr_regs
.abi
!= PERF_SAMPLE_REGS_ABI_NONE
) {
1923 u64 mask
= evsel
->attr
.sample_regs_intr
;
1925 sz
= hweight_long(mask
) * sizeof(u64
);
1926 OVERFLOW_CHECK(array
, sz
, max_size
);
1927 data
->intr_regs
.mask
= mask
;
1928 data
->intr_regs
.regs
= (u64
*)array
;
1929 array
= (void *)array
+ sz
;
1936 size_t perf_event__sample_event_size(const struct perf_sample
*sample
, u64 type
,
1939 size_t sz
, result
= sizeof(struct sample_event
);
1941 if (type
& PERF_SAMPLE_IDENTIFIER
)
1942 result
+= sizeof(u64
);
1944 if (type
& PERF_SAMPLE_IP
)
1945 result
+= sizeof(u64
);
1947 if (type
& PERF_SAMPLE_TID
)
1948 result
+= sizeof(u64
);
1950 if (type
& PERF_SAMPLE_TIME
)
1951 result
+= sizeof(u64
);
1953 if (type
& PERF_SAMPLE_ADDR
)
1954 result
+= sizeof(u64
);
1956 if (type
& PERF_SAMPLE_ID
)
1957 result
+= sizeof(u64
);
1959 if (type
& PERF_SAMPLE_STREAM_ID
)
1960 result
+= sizeof(u64
);
1962 if (type
& PERF_SAMPLE_CPU
)
1963 result
+= sizeof(u64
);
1965 if (type
& PERF_SAMPLE_PERIOD
)
1966 result
+= sizeof(u64
);
1968 if (type
& PERF_SAMPLE_READ
) {
1969 result
+= sizeof(u64
);
1970 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1971 result
+= sizeof(u64
);
1972 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1973 result
+= sizeof(u64
);
1974 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1975 if (read_format
& PERF_FORMAT_GROUP
) {
1976 sz
= sample
->read
.group
.nr
*
1977 sizeof(struct sample_read_value
);
1980 result
+= sizeof(u64
);
1984 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1985 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
1989 if (type
& PERF_SAMPLE_RAW
) {
1990 result
+= sizeof(u32
);
1991 result
+= sample
->raw_size
;
1994 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1995 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
2000 if (type
& PERF_SAMPLE_REGS_USER
) {
2001 if (sample
->user_regs
.abi
) {
2002 result
+= sizeof(u64
);
2003 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
2006 result
+= sizeof(u64
);
2010 if (type
& PERF_SAMPLE_STACK_USER
) {
2011 sz
= sample
->user_stack
.size
;
2012 result
+= sizeof(u64
);
2015 result
+= sizeof(u64
);
2019 if (type
& PERF_SAMPLE_WEIGHT
)
2020 result
+= sizeof(u64
);
2022 if (type
& PERF_SAMPLE_DATA_SRC
)
2023 result
+= sizeof(u64
);
2025 if (type
& PERF_SAMPLE_TRANSACTION
)
2026 result
+= sizeof(u64
);
2028 if (type
& PERF_SAMPLE_REGS_INTR
) {
2029 if (sample
->intr_regs
.abi
) {
2030 result
+= sizeof(u64
);
2031 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
2034 result
+= sizeof(u64
);
2041 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
,
2043 const struct perf_sample
*sample
,
2049 * used for cross-endian analysis. See git commit 65014ab3
2050 * for why this goofiness is needed.
2054 array
= event
->sample
.array
;
2056 if (type
& PERF_SAMPLE_IDENTIFIER
) {
2057 *array
= sample
->id
;
2061 if (type
& PERF_SAMPLE_IP
) {
2062 *array
= sample
->ip
;
2066 if (type
& PERF_SAMPLE_TID
) {
2067 u
.val32
[0] = sample
->pid
;
2068 u
.val32
[1] = sample
->tid
;
2071 * Inverse of what is done in perf_evsel__parse_sample
2073 u
.val32
[0] = bswap_32(u
.val32
[0]);
2074 u
.val32
[1] = bswap_32(u
.val32
[1]);
2075 u
.val64
= bswap_64(u
.val64
);
2082 if (type
& PERF_SAMPLE_TIME
) {
2083 *array
= sample
->time
;
2087 if (type
& PERF_SAMPLE_ADDR
) {
2088 *array
= sample
->addr
;
2092 if (type
& PERF_SAMPLE_ID
) {
2093 *array
= sample
->id
;
2097 if (type
& PERF_SAMPLE_STREAM_ID
) {
2098 *array
= sample
->stream_id
;
2102 if (type
& PERF_SAMPLE_CPU
) {
2103 u
.val32
[0] = sample
->cpu
;
2106 * Inverse of what is done in perf_evsel__parse_sample
2108 u
.val32
[0] = bswap_32(u
.val32
[0]);
2109 u
.val64
= bswap_64(u
.val64
);
2115 if (type
& PERF_SAMPLE_PERIOD
) {
2116 *array
= sample
->period
;
2120 if (type
& PERF_SAMPLE_READ
) {
2121 if (read_format
& PERF_FORMAT_GROUP
)
2122 *array
= sample
->read
.group
.nr
;
2124 *array
= sample
->read
.one
.value
;
2127 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
2128 *array
= sample
->read
.time_enabled
;
2132 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
2133 *array
= sample
->read
.time_running
;
2137 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2138 if (read_format
& PERF_FORMAT_GROUP
) {
2139 sz
= sample
->read
.group
.nr
*
2140 sizeof(struct sample_read_value
);
2141 memcpy(array
, sample
->read
.group
.values
, sz
);
2142 array
= (void *)array
+ sz
;
2144 *array
= sample
->read
.one
.id
;
2149 if (type
& PERF_SAMPLE_CALLCHAIN
) {
2150 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
2151 memcpy(array
, sample
->callchain
, sz
);
2152 array
= (void *)array
+ sz
;
2155 if (type
& PERF_SAMPLE_RAW
) {
2156 u
.val32
[0] = sample
->raw_size
;
2157 if (WARN_ONCE(swapped
,
2158 "Endianness of raw data not corrected!\n")) {
2160 * Inverse of what is done in perf_evsel__parse_sample
2162 u
.val32
[0] = bswap_32(u
.val32
[0]);
2163 u
.val32
[1] = bswap_32(u
.val32
[1]);
2164 u
.val64
= bswap_64(u
.val64
);
2167 array
= (void *)array
+ sizeof(u32
);
2169 memcpy(array
, sample
->raw_data
, sample
->raw_size
);
2170 array
= (void *)array
+ sample
->raw_size
;
2173 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
2174 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
2176 memcpy(array
, sample
->branch_stack
, sz
);
2177 array
= (void *)array
+ sz
;
2180 if (type
& PERF_SAMPLE_REGS_USER
) {
2181 if (sample
->user_regs
.abi
) {
2182 *array
++ = sample
->user_regs
.abi
;
2183 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
2184 memcpy(array
, sample
->user_regs
.regs
, sz
);
2185 array
= (void *)array
+ sz
;
2191 if (type
& PERF_SAMPLE_STACK_USER
) {
2192 sz
= sample
->user_stack
.size
;
2195 memcpy(array
, sample
->user_stack
.data
, sz
);
2196 array
= (void *)array
+ sz
;
2201 if (type
& PERF_SAMPLE_WEIGHT
) {
2202 *array
= sample
->weight
;
2206 if (type
& PERF_SAMPLE_DATA_SRC
) {
2207 *array
= sample
->data_src
;
2211 if (type
& PERF_SAMPLE_TRANSACTION
) {
2212 *array
= sample
->transaction
;
2216 if (type
& PERF_SAMPLE_REGS_INTR
) {
2217 if (sample
->intr_regs
.abi
) {
2218 *array
++ = sample
->intr_regs
.abi
;
2219 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
2220 memcpy(array
, sample
->intr_regs
.regs
, sz
);
2221 array
= (void *)array
+ sz
;
2230 struct format_field
*perf_evsel__field(struct perf_evsel
*evsel
, const char *name
)
2232 return pevent_find_field(evsel
->tp_format
, name
);
2235 void *perf_evsel__rawptr(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
2238 struct format_field
*field
= perf_evsel__field(evsel
, name
);
2244 offset
= field
->offset
;
2246 if (field
->flags
& FIELD_IS_DYNAMIC
) {
2247 offset
= *(int *)(sample
->raw_data
+ field
->offset
);
2251 return sample
->raw_data
+ offset
;
2254 u64
perf_evsel__intval(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
2257 struct format_field
*field
= perf_evsel__field(evsel
, name
);
2264 ptr
= sample
->raw_data
+ field
->offset
;
2266 switch (field
->size
) {
2270 value
= *(u16
*)ptr
;
2273 value
= *(u32
*)ptr
;
2276 memcpy(&value
, ptr
, sizeof(u64
));
2282 if (!evsel
->needs_swap
)
2285 switch (field
->size
) {
2287 return bswap_16(value
);
2289 return bswap_32(value
);
2291 return bswap_64(value
);
2299 bool perf_evsel__fallback(struct perf_evsel
*evsel
, int err
,
2300 char *msg
, size_t msgsize
)
2304 if ((err
== ENOENT
|| err
== ENXIO
|| err
== ENODEV
) &&
2305 evsel
->attr
.type
== PERF_TYPE_HARDWARE
&&
2306 evsel
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
) {
2308 * If it's cycles then fall back to hrtimer based
2309 * cpu-clock-tick sw counter, which is always available even if
2312 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2315 scnprintf(msg
, msgsize
, "%s",
2316 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2318 evsel
->attr
.type
= PERF_TYPE_SOFTWARE
;
2319 evsel
->attr
.config
= PERF_COUNT_SW_CPU_CLOCK
;
2321 zfree(&evsel
->name
);
2323 } else if (err
== EACCES
&& !evsel
->attr
.exclude_kernel
&&
2324 (paranoid
= perf_event_paranoid()) > 1) {
2325 const char *name
= perf_evsel__name(evsel
);
2328 if (asprintf(&new_name
, "%s%su", name
, strchr(name
, ':') ? "" : ":") < 0)
2333 evsel
->name
= new_name
;
2334 scnprintf(msg
, msgsize
,
2335 "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid
);
2336 evsel
->attr
.exclude_kernel
= 1;
2344 int perf_evsel__open_strerror(struct perf_evsel
*evsel
, struct target
*target
,
2345 int err
, char *msg
, size_t size
)
2347 char sbuf
[STRERR_BUFSIZE
];
2352 return scnprintf(msg
, size
,
2353 "You may not have permission to collect %sstats.\n\n"
2354 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
2355 "which controls use of the performance events system by\n"
2356 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
2357 "The current value is %d:\n\n"
2358 " -1: Allow use of (almost) all events by all users\n"
2359 ">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n"
2360 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
2361 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN",
2362 target
->system_wide
? "system-wide " : "",
2363 perf_event_paranoid());
2365 return scnprintf(msg
, size
, "The %s event is not supported.",
2366 perf_evsel__name(evsel
));
2368 return scnprintf(msg
, size
, "%s",
2369 "Too many events are opened.\n"
2370 "Probably the maximum number of open file descriptors has been reached.\n"
2371 "Hint: Try again after reducing the number of events.\n"
2372 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2374 if ((evsel
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
) != 0 &&
2375 access("/proc/sys/kernel/perf_event_max_stack", F_OK
) == 0)
2376 return scnprintf(msg
, size
,
2377 "Not enough memory to setup event with callchain.\n"
2378 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2379 "Hint: Current value: %d", sysctl_perf_event_max_stack
);
2382 if (target
->cpu_list
)
2383 return scnprintf(msg
, size
, "%s",
2384 "No such device - did you specify an out-of-range profile CPU?");
2387 if (evsel
->attr
.sample_period
!= 0)
2388 return scnprintf(msg
, size
, "%s",
2389 "PMU Hardware doesn't support sampling/overflow-interrupts.");
2390 if (evsel
->attr
.precise_ip
)
2391 return scnprintf(msg
, size
, "%s",
2392 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2393 #if defined(__i386__) || defined(__x86_64__)
2394 if (evsel
->attr
.type
== PERF_TYPE_HARDWARE
)
2395 return scnprintf(msg
, size
, "%s",
2396 "No hardware sampling interrupt available.\n"
2397 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2401 if (find_process("oprofiled"))
2402 return scnprintf(msg
, size
,
2403 "The PMU counters are busy/taken by another profiler.\n"
2404 "We found oprofile daemon running, please stop it and try again.");
2407 if (perf_missing_features
.clockid
)
2408 return scnprintf(msg
, size
, "clockid feature not supported.");
2409 if (perf_missing_features
.clockid_wrong
)
2410 return scnprintf(msg
, size
, "wrong clockid (%d).", clockid
);
2416 return scnprintf(msg
, size
,
2417 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2418 "/bin/dmesg may provide additional information.\n"
2419 "No CONFIG_PERF_EVENTS=y kernel support configured?",
2420 err
, strerror_r(err
, sbuf
, sizeof(sbuf
)),
2421 perf_evsel__name(evsel
));