4 #include "parse-events.h"
9 typedef void (*setup_probe_fn_t
)(struct perf_evsel
*evsel
);
11 static int perf_do_probe_api(setup_probe_fn_t fn
, int cpu
, const char *str
)
13 struct perf_evlist
*evlist
;
14 struct perf_evsel
*evsel
;
15 unsigned long flags
= perf_event_open_cloexec_flag();
16 int err
= -EAGAIN
, fd
;
17 static pid_t pid
= -1;
19 evlist
= perf_evlist__new();
23 if (parse_events(evlist
, str
, NULL
))
26 evsel
= perf_evlist__first(evlist
);
29 fd
= sys_perf_event_open(&evsel
->attr
, pid
, cpu
, -1, flags
);
31 if (pid
== -1 && errno
== EACCES
) {
43 fd
= sys_perf_event_open(&evsel
->attr
, pid
, cpu
, -1, flags
);
53 perf_evlist__delete(evlist
);
57 static bool perf_probe_api(setup_probe_fn_t fn
)
59 const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL
};
63 cpus
= cpu_map__new(NULL
);
70 ret
= perf_do_probe_api(fn
, cpu
, try[i
++]);
73 } while (ret
== -EAGAIN
&& try[i
]);
78 static void perf_probe_sample_identifier(struct perf_evsel
*evsel
)
80 evsel
->attr
.sample_type
|= PERF_SAMPLE_IDENTIFIER
;
83 static void perf_probe_comm_exec(struct perf_evsel
*evsel
)
85 evsel
->attr
.comm_exec
= 1;
88 static void perf_probe_context_switch(struct perf_evsel
*evsel
)
90 evsel
->attr
.context_switch
= 1;
93 bool perf_can_sample_identifier(void)
95 return perf_probe_api(perf_probe_sample_identifier
);
98 static bool perf_can_comm_exec(void)
100 return perf_probe_api(perf_probe_comm_exec
);
103 bool perf_can_record_switch_events(void)
105 return perf_probe_api(perf_probe_context_switch
);
108 bool perf_can_record_cpu_wide(void)
110 struct perf_event_attr attr
= {
111 .type
= PERF_TYPE_SOFTWARE
,
112 .config
= PERF_COUNT_SW_CPU_CLOCK
,
115 struct cpu_map
*cpus
;
118 cpus
= cpu_map__new(NULL
);
124 fd
= sys_perf_event_open(&attr
, -1, cpu
, -1, 0);
132 void perf_evlist__config(struct perf_evlist
*evlist
, struct record_opts
*opts
,
133 struct callchain_param
*callchain
)
135 struct perf_evsel
*evsel
;
136 bool use_sample_identifier
= false;
140 * Set the evsel leader links before we configure attributes,
141 * since some might depend on this info.
144 perf_evlist__set_leader(evlist
);
146 if (evlist
->cpus
->map
[0] < 0)
147 opts
->no_inherit
= true;
149 use_comm_exec
= perf_can_comm_exec();
151 evlist__for_each_entry(evlist
, evsel
) {
152 perf_evsel__config(evsel
, opts
, callchain
);
153 if (evsel
->tracking
&& use_comm_exec
)
154 evsel
->attr
.comm_exec
= 1;
157 if (opts
->full_auxtrace
) {
159 * Need to be able to synthesize and parse selected events with
160 * arbitrary sample types, which requires always being able to
163 use_sample_identifier
= perf_can_sample_identifier();
164 evlist__for_each_entry(evlist
, evsel
)
165 perf_evsel__set_sample_id(evsel
, use_sample_identifier
);
166 } else if (evlist
->nr_entries
> 1) {
167 struct perf_evsel
*first
= perf_evlist__first(evlist
);
169 evlist__for_each_entry(evlist
, evsel
) {
170 if (evsel
->attr
.sample_type
== first
->attr
.sample_type
)
172 use_sample_identifier
= perf_can_sample_identifier();
175 evlist__for_each_entry(evlist
, evsel
)
176 perf_evsel__set_sample_id(evsel
, use_sample_identifier
);
179 perf_evlist__set_id_pos(evlist
);
182 static int get_max_rate(unsigned int *rate
)
184 return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate
);
187 static int record_opts__config_freq(struct record_opts
*opts
)
189 bool user_freq
= opts
->user_freq
!= UINT_MAX
;
190 unsigned int max_rate
;
192 if (opts
->user_interval
!= ULLONG_MAX
)
193 opts
->default_interval
= opts
->user_interval
;
195 opts
->freq
= opts
->user_freq
;
198 * User specified count overrides default frequency.
200 if (opts
->default_interval
)
202 else if (opts
->freq
) {
203 opts
->default_interval
= opts
->freq
;
205 pr_err("frequency and count are zero, aborting\n");
209 if (get_max_rate(&max_rate
))
213 * User specified frequency is over current maximum.
215 if (user_freq
&& (max_rate
< opts
->freq
)) {
216 pr_err("Maximum frequency rate (%u) reached.\n"
217 "Please use -F freq option with lower value or consider\n"
218 "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
224 * Default frequency is over current maximum.
226 if (max_rate
< opts
->freq
) {
227 pr_warning("Lowering default frequency rate to %u.\n"
228 "Please consider tweaking "
229 "/proc/sys/kernel/perf_event_max_sample_rate.\n",
231 opts
->freq
= max_rate
;
237 int record_opts__config(struct record_opts
*opts
)
239 return record_opts__config_freq(opts
);
242 bool perf_evlist__can_select_event(struct perf_evlist
*evlist
, const char *str
)
244 struct perf_evlist
*temp_evlist
;
245 struct perf_evsel
*evsel
;
250 temp_evlist
= perf_evlist__new();
254 err
= parse_events(temp_evlist
, str
, NULL
);
258 evsel
= perf_evlist__last(temp_evlist
);
260 if (!evlist
|| cpu_map__empty(evlist
->cpus
)) {
261 struct cpu_map
*cpus
= cpu_map__new(NULL
);
263 cpu
= cpus
? cpus
->map
[0] : 0;
266 cpu
= evlist
->cpus
->map
[0];
270 fd
= sys_perf_event_open(&evsel
->attr
, pid
, cpu
, -1,
271 perf_event_open_cloexec_flag());
273 if (pid
== -1 && errno
== EACCES
) {
285 perf_evlist__delete(temp_evlist
);