Commit | Line | Data |
---|---|---|
f8a95309 ACM |
1 | /* |
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | |
3 | * | |
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | |
5 | * copyright notes. | |
6 | * | |
7 | * Released under the GPL v2. (and only v2, not any later version) | |
8 | */ | |
9 | ||
936be503 | 10 | #include <byteswap.h> |
0f6a3015 | 11 | #include <linux/bitops.h> |
936be503 | 12 | #include "asm/bug.h" |
69aad6f1 | 13 | #include "evsel.h" |
70082dd9 | 14 | #include "evlist.h" |
69aad6f1 | 15 | #include "util.h" |
86bd5e86 | 16 | #include "cpumap.h" |
fd78260b | 17 | #include "thread_map.h" |
12864b31 | 18 | #include "target.h" |
287e74aa | 19 | #include "../../../include/linux/hw_breakpoint.h" |
26d33022 JO |
20 | #include "../../include/linux/perf_event.h" |
21 | #include "perf_regs.h" | |
69aad6f1 | 22 | |
c52b12ed ACM |
23 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
24 | ||
bde09467 | 25 | static int __perf_evsel__sample_size(u64 sample_type) |
c2a70653 ACM |
26 | { |
27 | u64 mask = sample_type & PERF_SAMPLE_MASK; | |
28 | int size = 0; | |
29 | int i; | |
30 | ||
31 | for (i = 0; i < 64; i++) { | |
32 | if (mask & (1ULL << i)) | |
33 | size++; | |
34 | } | |
35 | ||
36 | size *= sizeof(u64); | |
37 | ||
38 | return size; | |
39 | } | |
40 | ||
4bf9ce1b | 41 | void hists__init(struct hists *hists) |
0e2a5f10 ACM |
42 | { |
43 | memset(hists, 0, sizeof(*hists)); | |
44 | hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT; | |
45 | hists->entries_in = &hists->entries_in_array[0]; | |
46 | hists->entries_collapsed = RB_ROOT; | |
47 | hists->entries = RB_ROOT; | |
48 | pthread_mutex_init(&hists->lock, NULL); | |
49 | } | |
50 | ||
ef1d1af2 ACM |
51 | void perf_evsel__init(struct perf_evsel *evsel, |
52 | struct perf_event_attr *attr, int idx) | |
53 | { | |
54 | evsel->idx = idx; | |
55 | evsel->attr = *attr; | |
56 | INIT_LIST_HEAD(&evsel->node); | |
1980c2eb | 57 | hists__init(&evsel->hists); |
bde09467 | 58 | evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); |
ef1d1af2 ACM |
59 | } |
60 | ||
23a2f3ab | 61 | struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) |
69aad6f1 ACM |
62 | { |
63 | struct perf_evsel *evsel = zalloc(sizeof(*evsel)); | |
64 | ||
ef1d1af2 ACM |
65 | if (evsel != NULL) |
66 | perf_evsel__init(evsel, attr, idx); | |
69aad6f1 ACM |
67 | |
68 | return evsel; | |
69 | } | |
70 | ||
c410431c ACM |
71 | static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { |
72 | "cycles", | |
73 | "instructions", | |
74 | "cache-references", | |
75 | "cache-misses", | |
76 | "branches", | |
77 | "branch-misses", | |
78 | "bus-cycles", | |
79 | "stalled-cycles-frontend", | |
80 | "stalled-cycles-backend", | |
81 | "ref-cycles", | |
82 | }; | |
83 | ||
dd4f5223 | 84 | static const char *__perf_evsel__hw_name(u64 config) |
c410431c ACM |
85 | { |
86 | if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config]) | |
87 | return perf_evsel__hw_names[config]; | |
88 | ||
89 | return "unknown-hardware"; | |
90 | } | |
91 | ||
27f18617 | 92 | static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size) |
c410431c | 93 | { |
27f18617 | 94 | int colon = 0, r = 0; |
c410431c | 95 | struct perf_event_attr *attr = &evsel->attr; |
c410431c ACM |
96 | bool exclude_guest_default = false; |
97 | ||
98 | #define MOD_PRINT(context, mod) do { \ | |
99 | if (!attr->exclude_##context) { \ | |
27f18617 | 100 | if (!colon) colon = ++r; \ |
c410431c ACM |
101 | r += scnprintf(bf + r, size - r, "%c", mod); \ |
102 | } } while(0) | |
103 | ||
104 | if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) { | |
105 | MOD_PRINT(kernel, 'k'); | |
106 | MOD_PRINT(user, 'u'); | |
107 | MOD_PRINT(hv, 'h'); | |
108 | exclude_guest_default = true; | |
109 | } | |
110 | ||
111 | if (attr->precise_ip) { | |
112 | if (!colon) | |
27f18617 | 113 | colon = ++r; |
c410431c ACM |
114 | r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp"); |
115 | exclude_guest_default = true; | |
116 | } | |
117 | ||
118 | if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) { | |
119 | MOD_PRINT(host, 'H'); | |
120 | MOD_PRINT(guest, 'G'); | |
121 | } | |
122 | #undef MOD_PRINT | |
123 | if (colon) | |
27f18617 | 124 | bf[colon - 1] = ':'; |
c410431c ACM |
125 | return r; |
126 | } | |
127 | ||
27f18617 ACM |
128 | static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size) |
129 | { | |
130 | int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config)); | |
131 | return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); | |
132 | } | |
133 | ||
335c2f5d ACM |
134 | static const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { |
135 | "cpu-clock", | |
136 | "task-clock", | |
137 | "page-faults", | |
138 | "context-switches", | |
139 | "CPU-migrations", | |
140 | "minor-faults", | |
141 | "major-faults", | |
142 | "alignment-faults", | |
143 | "emulation-faults", | |
144 | }; | |
145 | ||
dd4f5223 | 146 | static const char *__perf_evsel__sw_name(u64 config) |
335c2f5d ACM |
147 | { |
148 | if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config]) | |
149 | return perf_evsel__sw_names[config]; | |
150 | return "unknown-software"; | |
151 | } | |
152 | ||
153 | static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size) | |
154 | { | |
155 | int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config)); | |
156 | return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); | |
157 | } | |
158 | ||
287e74aa JO |
159 | static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type) |
160 | { | |
161 | int r; | |
162 | ||
163 | r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr); | |
164 | ||
165 | if (type & HW_BREAKPOINT_R) | |
166 | r += scnprintf(bf + r, size - r, "r"); | |
167 | ||
168 | if (type & HW_BREAKPOINT_W) | |
169 | r += scnprintf(bf + r, size - r, "w"); | |
170 | ||
171 | if (type & HW_BREAKPOINT_X) | |
172 | r += scnprintf(bf + r, size - r, "x"); | |
173 | ||
174 | return r; | |
175 | } | |
176 | ||
177 | static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size) | |
178 | { | |
179 | struct perf_event_attr *attr = &evsel->attr; | |
180 | int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type); | |
181 | return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); | |
182 | } | |
183 | ||
0b668bc9 ACM |
184 | const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] |
185 | [PERF_EVSEL__MAX_ALIASES] = { | |
186 | { "L1-dcache", "l1-d", "l1d", "L1-data", }, | |
187 | { "L1-icache", "l1-i", "l1i", "L1-instruction", }, | |
188 | { "LLC", "L2", }, | |
189 | { "dTLB", "d-tlb", "Data-TLB", }, | |
190 | { "iTLB", "i-tlb", "Instruction-TLB", }, | |
191 | { "branch", "branches", "bpu", "btb", "bpc", }, | |
192 | { "node", }, | |
193 | }; | |
194 | ||
195 | const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] | |
196 | [PERF_EVSEL__MAX_ALIASES] = { | |
197 | { "load", "loads", "read", }, | |
198 | { "store", "stores", "write", }, | |
199 | { "prefetch", "prefetches", "speculative-read", "speculative-load", }, | |
200 | }; | |
201 | ||
202 | const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] | |
203 | [PERF_EVSEL__MAX_ALIASES] = { | |
204 | { "refs", "Reference", "ops", "access", }, | |
205 | { "misses", "miss", }, | |
206 | }; | |
207 | ||
208 | #define C(x) PERF_COUNT_HW_CACHE_##x | |
209 | #define CACHE_READ (1 << C(OP_READ)) | |
210 | #define CACHE_WRITE (1 << C(OP_WRITE)) | |
211 | #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) | |
212 | #define COP(x) (1 << x) | |
213 | ||
214 | /* | |
215 | * cache operartion stat | |
216 | * L1I : Read and prefetch only | |
217 | * ITLB and BPU : Read-only | |
218 | */ | |
219 | static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = { | |
220 | [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | |
221 | [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), | |
222 | [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | |
223 | [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | |
224 | [C(ITLB)] = (CACHE_READ), | |
225 | [C(BPU)] = (CACHE_READ), | |
226 | [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | |
227 | }; | |
228 | ||
229 | bool perf_evsel__is_cache_op_valid(u8 type, u8 op) | |
230 | { | |
231 | if (perf_evsel__hw_cache_stat[type] & COP(op)) | |
232 | return true; /* valid */ | |
233 | else | |
234 | return false; /* invalid */ | |
235 | } | |
236 | ||
237 | int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, | |
238 | char *bf, size_t size) | |
239 | { | |
240 | if (result) { | |
241 | return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0], | |
242 | perf_evsel__hw_cache_op[op][0], | |
243 | perf_evsel__hw_cache_result[result][0]); | |
244 | } | |
245 | ||
246 | return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0], | |
247 | perf_evsel__hw_cache_op[op][1]); | |
248 | } | |
249 | ||
dd4f5223 | 250 | static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size) |
0b668bc9 ACM |
251 | { |
252 | u8 op, result, type = (config >> 0) & 0xff; | |
253 | const char *err = "unknown-ext-hardware-cache-type"; | |
254 | ||
255 | if (type > PERF_COUNT_HW_CACHE_MAX) | |
256 | goto out_err; | |
257 | ||
258 | op = (config >> 8) & 0xff; | |
259 | err = "unknown-ext-hardware-cache-op"; | |
260 | if (op > PERF_COUNT_HW_CACHE_OP_MAX) | |
261 | goto out_err; | |
262 | ||
263 | result = (config >> 16) & 0xff; | |
264 | err = "unknown-ext-hardware-cache-result"; | |
265 | if (result > PERF_COUNT_HW_CACHE_RESULT_MAX) | |
266 | goto out_err; | |
267 | ||
268 | err = "invalid-cache"; | |
269 | if (!perf_evsel__is_cache_op_valid(type, op)) | |
270 | goto out_err; | |
271 | ||
272 | return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size); | |
273 | out_err: | |
274 | return scnprintf(bf, size, "%s", err); | |
275 | } | |
276 | ||
277 | static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size) | |
278 | { | |
279 | int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size); | |
280 | return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); | |
281 | } | |
282 | ||
6eef3d9c ACM |
283 | static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size) |
284 | { | |
285 | int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config); | |
286 | return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); | |
287 | } | |
288 | ||
7289f83c | 289 | const char *perf_evsel__name(struct perf_evsel *evsel) |
a4460836 | 290 | { |
7289f83c | 291 | char bf[128]; |
a4460836 | 292 | |
7289f83c ACM |
293 | if (evsel->name) |
294 | return evsel->name; | |
c410431c ACM |
295 | |
296 | switch (evsel->attr.type) { | |
297 | case PERF_TYPE_RAW: | |
6eef3d9c | 298 | perf_evsel__raw_name(evsel, bf, sizeof(bf)); |
c410431c ACM |
299 | break; |
300 | ||
301 | case PERF_TYPE_HARDWARE: | |
7289f83c | 302 | perf_evsel__hw_name(evsel, bf, sizeof(bf)); |
c410431c | 303 | break; |
0b668bc9 ACM |
304 | |
305 | case PERF_TYPE_HW_CACHE: | |
7289f83c | 306 | perf_evsel__hw_cache_name(evsel, bf, sizeof(bf)); |
0b668bc9 ACM |
307 | break; |
308 | ||
335c2f5d | 309 | case PERF_TYPE_SOFTWARE: |
7289f83c | 310 | perf_evsel__sw_name(evsel, bf, sizeof(bf)); |
335c2f5d ACM |
311 | break; |
312 | ||
a4460836 | 313 | case PERF_TYPE_TRACEPOINT: |
7289f83c | 314 | scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint"); |
a4460836 ACM |
315 | break; |
316 | ||
287e74aa JO |
317 | case PERF_TYPE_BREAKPOINT: |
318 | perf_evsel__bp_name(evsel, bf, sizeof(bf)); | |
319 | break; | |
320 | ||
c410431c | 321 | default: |
7289f83c | 322 | scnprintf(bf, sizeof(bf), "%s", "unknown attr type"); |
a4460836 | 323 | break; |
c410431c ACM |
324 | } |
325 | ||
7289f83c ACM |
326 | evsel->name = strdup(bf); |
327 | ||
328 | return evsel->name ?: "unknown"; | |
c410431c ACM |
329 | } |
330 | ||
5090c6ae NK |
331 | void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts, |
332 | struct perf_evsel *first) | |
0f82ebc4 ACM |
333 | { |
334 | struct perf_event_attr *attr = &evsel->attr; | |
335 | int track = !evsel->idx; /* only the first counter needs these */ | |
336 | ||
5e1c81d9 | 337 | attr->disabled = 1; |
808e1226 | 338 | attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; |
0f82ebc4 ACM |
339 | attr->inherit = !opts->no_inherit; |
340 | attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | | |
341 | PERF_FORMAT_TOTAL_TIME_RUNNING | | |
342 | PERF_FORMAT_ID; | |
343 | ||
344 | attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; | |
345 | ||
346 | /* | |
347 | * We default some events to a 1 default interval. But keep | |
348 | * it a weak assumption overridable by the user. | |
349 | */ | |
350 | if (!attr->sample_period || (opts->user_freq != UINT_MAX && | |
351 | opts->user_interval != ULLONG_MAX)) { | |
352 | if (opts->freq) { | |
353 | attr->sample_type |= PERF_SAMPLE_PERIOD; | |
354 | attr->freq = 1; | |
355 | attr->sample_freq = opts->freq; | |
356 | } else { | |
357 | attr->sample_period = opts->default_interval; | |
358 | } | |
359 | } | |
360 | ||
361 | if (opts->no_samples) | |
362 | attr->sample_freq = 0; | |
363 | ||
364 | if (opts->inherit_stat) | |
365 | attr->inherit_stat = 1; | |
366 | ||
367 | if (opts->sample_address) { | |
368 | attr->sample_type |= PERF_SAMPLE_ADDR; | |
369 | attr->mmap_data = track; | |
370 | } | |
371 | ||
26d33022 | 372 | if (opts->call_graph) { |
0f82ebc4 ACM |
373 | attr->sample_type |= PERF_SAMPLE_CALLCHAIN; |
374 | ||
26d33022 JO |
375 | if (opts->call_graph == CALLCHAIN_DWARF) { |
376 | attr->sample_type |= PERF_SAMPLE_REGS_USER | | |
377 | PERF_SAMPLE_STACK_USER; | |
378 | attr->sample_regs_user = PERF_REGS_MASK; | |
379 | attr->sample_stack_user = opts->stack_dump_size; | |
380 | attr->exclude_callchain_user = 1; | |
381 | } | |
382 | } | |
383 | ||
e40ee742 | 384 | if (perf_target__has_cpu(&opts->target)) |
0f82ebc4 ACM |
385 | attr->sample_type |= PERF_SAMPLE_CPU; |
386 | ||
3e76ac78 AV |
387 | if (opts->period) |
388 | attr->sample_type |= PERF_SAMPLE_PERIOD; | |
389 | ||
808e1226 | 390 | if (!opts->sample_id_all_missing && |
d67356e7 | 391 | (opts->sample_time || !opts->no_inherit || |
aa22dd49 | 392 | perf_target__has_cpu(&opts->target))) |
0f82ebc4 ACM |
393 | attr->sample_type |= PERF_SAMPLE_TIME; |
394 | ||
395 | if (opts->raw_samples) { | |
396 | attr->sample_type |= PERF_SAMPLE_TIME; | |
397 | attr->sample_type |= PERF_SAMPLE_RAW; | |
398 | attr->sample_type |= PERF_SAMPLE_CPU; | |
399 | } | |
400 | ||
401 | if (opts->no_delay) { | |
402 | attr->watermark = 0; | |
403 | attr->wakeup_events = 1; | |
404 | } | |
bdfebd84 RAV |
405 | if (opts->branch_stack) { |
406 | attr->sample_type |= PERF_SAMPLE_BRANCH_STACK; | |
407 | attr->branch_sample_type = opts->branch_stack; | |
408 | } | |
0f82ebc4 ACM |
409 | |
410 | attr->mmap = track; | |
411 | attr->comm = track; | |
412 | ||
d67356e7 NK |
413 | if (perf_target__none(&opts->target) && |
414 | (!opts->group || evsel == first)) { | |
0f82ebc4 ACM |
415 | attr->enable_on_exec = 1; |
416 | } | |
417 | } | |
418 | ||
69aad6f1 ACM |
419 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) |
420 | { | |
4af4c955 | 421 | int cpu, thread; |
69aad6f1 | 422 | evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); |
4af4c955 DA |
423 | |
424 | if (evsel->fd) { | |
425 | for (cpu = 0; cpu < ncpus; cpu++) { | |
426 | for (thread = 0; thread < nthreads; thread++) { | |
427 | FD(evsel, cpu, thread) = -1; | |
428 | } | |
429 | } | |
430 | } | |
431 | ||
69aad6f1 ACM |
432 | return evsel->fd != NULL ? 0 : -ENOMEM; |
433 | } | |
434 | ||
70db7533 ACM |
435 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) |
436 | { | |
a91e5431 ACM |
437 | evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); |
438 | if (evsel->sample_id == NULL) | |
439 | return -ENOMEM; | |
440 | ||
441 | evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); | |
442 | if (evsel->id == NULL) { | |
443 | xyarray__delete(evsel->sample_id); | |
444 | evsel->sample_id = NULL; | |
445 | return -ENOMEM; | |
446 | } | |
447 | ||
448 | return 0; | |
70db7533 ACM |
449 | } |
450 | ||
c52b12ed ACM |
451 | int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus) |
452 | { | |
453 | evsel->counts = zalloc((sizeof(*evsel->counts) + | |
454 | (ncpus * sizeof(struct perf_counts_values)))); | |
455 | return evsel->counts != NULL ? 0 : -ENOMEM; | |
456 | } | |
457 | ||
69aad6f1 ACM |
458 | void perf_evsel__free_fd(struct perf_evsel *evsel) |
459 | { | |
460 | xyarray__delete(evsel->fd); | |
461 | evsel->fd = NULL; | |
462 | } | |
463 | ||
70db7533 ACM |
464 | void perf_evsel__free_id(struct perf_evsel *evsel) |
465 | { | |
a91e5431 ACM |
466 | xyarray__delete(evsel->sample_id); |
467 | evsel->sample_id = NULL; | |
468 | free(evsel->id); | |
70db7533 ACM |
469 | evsel->id = NULL; |
470 | } | |
471 | ||
c52b12ed ACM |
472 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) |
473 | { | |
474 | int cpu, thread; | |
475 | ||
476 | for (cpu = 0; cpu < ncpus; cpu++) | |
477 | for (thread = 0; thread < nthreads; ++thread) { | |
478 | close(FD(evsel, cpu, thread)); | |
479 | FD(evsel, cpu, thread) = -1; | |
480 | } | |
481 | } | |
482 | ||
ef1d1af2 | 483 | void perf_evsel__exit(struct perf_evsel *evsel) |
69aad6f1 ACM |
484 | { |
485 | assert(list_empty(&evsel->node)); | |
486 | xyarray__delete(evsel->fd); | |
a91e5431 ACM |
487 | xyarray__delete(evsel->sample_id); |
488 | free(evsel->id); | |
ef1d1af2 ACM |
489 | } |
490 | ||
491 | void perf_evsel__delete(struct perf_evsel *evsel) | |
492 | { | |
493 | perf_evsel__exit(evsel); | |
023695d9 | 494 | close_cgroup(evsel->cgrp); |
6a4bb04c | 495 | free(evsel->group_name); |
f0c55bcf | 496 | free(evsel->name); |
69aad6f1 ACM |
497 | free(evsel); |
498 | } | |
c52b12ed ACM |
499 | |
500 | int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, | |
501 | int cpu, int thread, bool scale) | |
502 | { | |
503 | struct perf_counts_values count; | |
504 | size_t nv = scale ? 3 : 1; | |
505 | ||
506 | if (FD(evsel, cpu, thread) < 0) | |
507 | return -EINVAL; | |
508 | ||
4eed11d5 ACM |
509 | if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0) |
510 | return -ENOMEM; | |
511 | ||
c52b12ed ACM |
512 | if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) |
513 | return -errno; | |
514 | ||
515 | if (scale) { | |
516 | if (count.run == 0) | |
517 | count.val = 0; | |
518 | else if (count.run < count.ena) | |
519 | count.val = (u64)((double)count.val * count.ena / count.run + 0.5); | |
520 | } else | |
521 | count.ena = count.run = 0; | |
522 | ||
523 | evsel->counts->cpu[cpu] = count; | |
524 | return 0; | |
525 | } | |
526 | ||
527 | int __perf_evsel__read(struct perf_evsel *evsel, | |
528 | int ncpus, int nthreads, bool scale) | |
529 | { | |
530 | size_t nv = scale ? 3 : 1; | |
531 | int cpu, thread; | |
532 | struct perf_counts_values *aggr = &evsel->counts->aggr, count; | |
533 | ||
52bcd994 | 534 | aggr->val = aggr->ena = aggr->run = 0; |
c52b12ed ACM |
535 | |
536 | for (cpu = 0; cpu < ncpus; cpu++) { | |
537 | for (thread = 0; thread < nthreads; thread++) { | |
538 | if (FD(evsel, cpu, thread) < 0) | |
539 | continue; | |
540 | ||
541 | if (readn(FD(evsel, cpu, thread), | |
542 | &count, nv * sizeof(u64)) < 0) | |
543 | return -errno; | |
544 | ||
545 | aggr->val += count.val; | |
546 | if (scale) { | |
547 | aggr->ena += count.ena; | |
548 | aggr->run += count.run; | |
549 | } | |
550 | } | |
551 | } | |
552 | ||
553 | evsel->counts->scaled = 0; | |
554 | if (scale) { | |
555 | if (aggr->run == 0) { | |
556 | evsel->counts->scaled = -1; | |
557 | aggr->val = 0; | |
558 | return 0; | |
559 | } | |
560 | ||
561 | if (aggr->run < aggr->ena) { | |
562 | evsel->counts->scaled = 1; | |
563 | aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5); | |
564 | } | |
565 | } else | |
566 | aggr->ena = aggr->run = 0; | |
567 | ||
568 | return 0; | |
569 | } | |
48290609 | 570 | |
6a4bb04c JO |
571 | static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) |
572 | { | |
573 | struct perf_evsel *leader = evsel->leader; | |
574 | int fd; | |
575 | ||
576 | if (!leader) | |
577 | return -1; | |
578 | ||
579 | /* | |
580 | * Leader must be already processed/open, | |
581 | * if not it's a bug. | |
582 | */ | |
583 | BUG_ON(!leader->fd); | |
584 | ||
585 | fd = FD(leader, cpu, thread); | |
586 | BUG_ON(fd == -1); | |
587 | ||
588 | return fd; | |
589 | } | |
590 | ||
0252208e | 591 | static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, |
6a4bb04c | 592 | struct thread_map *threads) |
48290609 | 593 | { |
0252208e | 594 | int cpu, thread; |
023695d9 | 595 | unsigned long flags = 0; |
727ab04e | 596 | int pid = -1, err; |
48290609 | 597 | |
0252208e ACM |
598 | if (evsel->fd == NULL && |
599 | perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) | |
727ab04e | 600 | return -ENOMEM; |
4eed11d5 | 601 | |
023695d9 SE |
602 | if (evsel->cgrp) { |
603 | flags = PERF_FLAG_PID_CGROUP; | |
604 | pid = evsel->cgrp->fd; | |
605 | } | |
606 | ||
86bd5e86 | 607 | for (cpu = 0; cpu < cpus->nr; cpu++) { |
9d04f178 | 608 | |
0252208e | 609 | for (thread = 0; thread < threads->nr; thread++) { |
6a4bb04c | 610 | int group_fd; |
023695d9 SE |
611 | |
612 | if (!evsel->cgrp) | |
613 | pid = threads->map[thread]; | |
614 | ||
6a4bb04c JO |
615 | group_fd = get_group_fd(evsel, cpu, thread); |
616 | ||
0252208e | 617 | FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, |
023695d9 | 618 | pid, |
f08199d3 | 619 | cpus->map[cpu], |
023695d9 | 620 | group_fd, flags); |
727ab04e ACM |
621 | if (FD(evsel, cpu, thread) < 0) { |
622 | err = -errno; | |
0252208e | 623 | goto out_close; |
727ab04e | 624 | } |
0252208e | 625 | } |
48290609 ACM |
626 | } |
627 | ||
628 | return 0; | |
629 | ||
630 | out_close: | |
0252208e ACM |
631 | do { |
632 | while (--thread >= 0) { | |
633 | close(FD(evsel, cpu, thread)); | |
634 | FD(evsel, cpu, thread) = -1; | |
635 | } | |
636 | thread = threads->nr; | |
637 | } while (--cpu >= 0); | |
727ab04e ACM |
638 | return err; |
639 | } | |
640 | ||
641 | void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads) | |
642 | { | |
643 | if (evsel->fd == NULL) | |
644 | return; | |
645 | ||
646 | perf_evsel__close_fd(evsel, ncpus, nthreads); | |
647 | perf_evsel__free_fd(evsel); | |
648 | evsel->fd = NULL; | |
48290609 ACM |
649 | } |
650 | ||
0252208e ACM |
651 | static struct { |
652 | struct cpu_map map; | |
653 | int cpus[1]; | |
654 | } empty_cpu_map = { | |
655 | .map.nr = 1, | |
656 | .cpus = { -1, }, | |
657 | }; | |
658 | ||
659 | static struct { | |
660 | struct thread_map map; | |
661 | int threads[1]; | |
662 | } empty_thread_map = { | |
663 | .map.nr = 1, | |
664 | .threads = { -1, }, | |
665 | }; | |
666 | ||
f08199d3 | 667 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, |
6a4bb04c | 668 | struct thread_map *threads) |
48290609 | 669 | { |
0252208e ACM |
670 | if (cpus == NULL) { |
671 | /* Work around old compiler warnings about strict aliasing */ | |
672 | cpus = &empty_cpu_map.map; | |
48290609 ACM |
673 | } |
674 | ||
0252208e ACM |
675 | if (threads == NULL) |
676 | threads = &empty_thread_map.map; | |
48290609 | 677 | |
6a4bb04c | 678 | return __perf_evsel__open(evsel, cpus, threads); |
48290609 ACM |
679 | } |
680 | ||
f08199d3 | 681 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, |
6a4bb04c | 682 | struct cpu_map *cpus) |
48290609 | 683 | { |
6a4bb04c | 684 | return __perf_evsel__open(evsel, cpus, &empty_thread_map.map); |
0252208e | 685 | } |
48290609 | 686 | |
f08199d3 | 687 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, |
6a4bb04c | 688 | struct thread_map *threads) |
0252208e | 689 | { |
6a4bb04c | 690 | return __perf_evsel__open(evsel, &empty_cpu_map.map, threads); |
48290609 | 691 | } |
70082dd9 | 692 | |
8115d60c | 693 | static int perf_event__parse_id_sample(const union perf_event *event, u64 type, |
37073f9e JO |
694 | struct perf_sample *sample, |
695 | bool swapped) | |
d0dd74e8 ACM |
696 | { |
697 | const u64 *array = event->sample.array; | |
37073f9e | 698 | union u64_swap u; |
d0dd74e8 ACM |
699 | |
700 | array += ((event->header.size - | |
701 | sizeof(event->header)) / sizeof(u64)) - 1; | |
702 | ||
703 | if (type & PERF_SAMPLE_CPU) { | |
37073f9e JO |
704 | u.val64 = *array; |
705 | if (swapped) { | |
706 | /* undo swap of u64, then swap on individual u32s */ | |
707 | u.val64 = bswap_64(u.val64); | |
708 | u.val32[0] = bswap_32(u.val32[0]); | |
709 | } | |
710 | ||
711 | sample->cpu = u.val32[0]; | |
d0dd74e8 ACM |
712 | array--; |
713 | } | |
714 | ||
715 | if (type & PERF_SAMPLE_STREAM_ID) { | |
716 | sample->stream_id = *array; | |
717 | array--; | |
718 | } | |
719 | ||
720 | if (type & PERF_SAMPLE_ID) { | |
721 | sample->id = *array; | |
722 | array--; | |
723 | } | |
724 | ||
725 | if (type & PERF_SAMPLE_TIME) { | |
726 | sample->time = *array; | |
727 | array--; | |
728 | } | |
729 | ||
730 | if (type & PERF_SAMPLE_TID) { | |
37073f9e JO |
731 | u.val64 = *array; |
732 | if (swapped) { | |
733 | /* undo swap of u64, then swap on individual u32s */ | |
734 | u.val64 = bswap_64(u.val64); | |
735 | u.val32[0] = bswap_32(u.val32[0]); | |
736 | u.val32[1] = bswap_32(u.val32[1]); | |
737 | } | |
738 | ||
739 | sample->pid = u.val32[0]; | |
740 | sample->tid = u.val32[1]; | |
d0dd74e8 ACM |
741 | } |
742 | ||
743 | return 0; | |
744 | } | |
745 | ||
98e1da90 FW |
746 | static bool sample_overlap(const union perf_event *event, |
747 | const void *offset, u64 size) | |
748 | { | |
749 | const void *base = event; | |
750 | ||
751 | if (offset + size > base + event->header.size) | |
752 | return true; | |
753 | ||
754 | return false; | |
755 | } | |
756 | ||
a3f698fe | 757 | int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, |
936be503 | 758 | struct perf_sample *data, bool swapped) |
d0dd74e8 | 759 | { |
a3f698fe | 760 | u64 type = evsel->attr.sample_type; |
0f6a3015 | 761 | u64 regs_user = evsel->attr.sample_regs_user; |
d0dd74e8 ACM |
762 | const u64 *array; |
763 | ||
936be503 DA |
764 | /* |
765 | * used for cross-endian analysis. See git commit 65014ab3 | |
766 | * for why this goofiness is needed. | |
767 | */ | |
6a11f92e | 768 | union u64_swap u; |
936be503 | 769 | |
f3bda2c9 | 770 | memset(data, 0, sizeof(*data)); |
d0dd74e8 ACM |
771 | data->cpu = data->pid = data->tid = -1; |
772 | data->stream_id = data->id = data->time = -1ULL; | |
a4a03fc7 | 773 | data->period = 1; |
d0dd74e8 ACM |
774 | |
775 | if (event->header.type != PERF_RECORD_SAMPLE) { | |
a3f698fe | 776 | if (!evsel->attr.sample_id_all) |
d0dd74e8 | 777 | return 0; |
37073f9e | 778 | return perf_event__parse_id_sample(event, type, data, swapped); |
d0dd74e8 ACM |
779 | } |
780 | ||
781 | array = event->sample.array; | |
782 | ||
a3f698fe | 783 | if (evsel->sample_size + sizeof(event->header) > event->header.size) |
a2854124 FW |
784 | return -EFAULT; |
785 | ||
d0dd74e8 ACM |
786 | if (type & PERF_SAMPLE_IP) { |
787 | data->ip = event->ip.ip; | |
788 | array++; | |
789 | } | |
790 | ||
791 | if (type & PERF_SAMPLE_TID) { | |
936be503 DA |
792 | u.val64 = *array; |
793 | if (swapped) { | |
794 | /* undo swap of u64, then swap on individual u32s */ | |
795 | u.val64 = bswap_64(u.val64); | |
796 | u.val32[0] = bswap_32(u.val32[0]); | |
797 | u.val32[1] = bswap_32(u.val32[1]); | |
798 | } | |
799 | ||
800 | data->pid = u.val32[0]; | |
801 | data->tid = u.val32[1]; | |
d0dd74e8 ACM |
802 | array++; |
803 | } | |
804 | ||
805 | if (type & PERF_SAMPLE_TIME) { | |
806 | data->time = *array; | |
807 | array++; | |
808 | } | |
809 | ||
7cec0922 | 810 | data->addr = 0; |
d0dd74e8 ACM |
811 | if (type & PERF_SAMPLE_ADDR) { |
812 | data->addr = *array; | |
813 | array++; | |
814 | } | |
815 | ||
816 | data->id = -1ULL; | |
817 | if (type & PERF_SAMPLE_ID) { | |
818 | data->id = *array; | |
819 | array++; | |
820 | } | |
821 | ||
822 | if (type & PERF_SAMPLE_STREAM_ID) { | |
823 | data->stream_id = *array; | |
824 | array++; | |
825 | } | |
826 | ||
827 | if (type & PERF_SAMPLE_CPU) { | |
936be503 DA |
828 | |
829 | u.val64 = *array; | |
830 | if (swapped) { | |
831 | /* undo swap of u64, then swap on individual u32s */ | |
832 | u.val64 = bswap_64(u.val64); | |
833 | u.val32[0] = bswap_32(u.val32[0]); | |
834 | } | |
835 | ||
836 | data->cpu = u.val32[0]; | |
d0dd74e8 ACM |
837 | array++; |
838 | } | |
839 | ||
840 | if (type & PERF_SAMPLE_PERIOD) { | |
841 | data->period = *array; | |
842 | array++; | |
843 | } | |
844 | ||
845 | if (type & PERF_SAMPLE_READ) { | |
f9d36996 | 846 | fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n"); |
d0dd74e8 ACM |
847 | return -1; |
848 | } | |
849 | ||
850 | if (type & PERF_SAMPLE_CALLCHAIN) { | |
98e1da90 FW |
851 | if (sample_overlap(event, array, sizeof(data->callchain->nr))) |
852 | return -EFAULT; | |
853 | ||
d0dd74e8 | 854 | data->callchain = (struct ip_callchain *)array; |
98e1da90 FW |
855 | |
856 | if (sample_overlap(event, array, data->callchain->nr)) | |
857 | return -EFAULT; | |
858 | ||
d0dd74e8 ACM |
859 | array += 1 + data->callchain->nr; |
860 | } | |
861 | ||
862 | if (type & PERF_SAMPLE_RAW) { | |
8e303f20 JO |
863 | const u64 *pdata; |
864 | ||
936be503 DA |
865 | u.val64 = *array; |
866 | if (WARN_ONCE(swapped, | |
867 | "Endianness of raw data not corrected!\n")) { | |
868 | /* undo swap of u64, then swap on individual u32s */ | |
869 | u.val64 = bswap_64(u.val64); | |
870 | u.val32[0] = bswap_32(u.val32[0]); | |
871 | u.val32[1] = bswap_32(u.val32[1]); | |
872 | } | |
98e1da90 FW |
873 | |
874 | if (sample_overlap(event, array, sizeof(u32))) | |
875 | return -EFAULT; | |
876 | ||
936be503 | 877 | data->raw_size = u.val32[0]; |
8e303f20 | 878 | pdata = (void *) array + sizeof(u32); |
98e1da90 | 879 | |
8e303f20 | 880 | if (sample_overlap(event, pdata, data->raw_size)) |
98e1da90 FW |
881 | return -EFAULT; |
882 | ||
8e303f20 | 883 | data->raw_data = (void *) pdata; |
fa30c964 SE |
884 | |
885 | array = (void *)array + data->raw_size + sizeof(u32); | |
d0dd74e8 ACM |
886 | } |
887 | ||
b5387528 RAV |
888 | if (type & PERF_SAMPLE_BRANCH_STACK) { |
889 | u64 sz; | |
890 | ||
891 | data->branch_stack = (struct branch_stack *)array; | |
892 | array++; /* nr */ | |
893 | ||
894 | sz = data->branch_stack->nr * sizeof(struct branch_entry); | |
895 | sz /= sizeof(u64); | |
896 | array += sz; | |
897 | } | |
0f6a3015 JO |
898 | |
899 | if (type & PERF_SAMPLE_REGS_USER) { | |
900 | /* First u64 tells us if we have any regs in sample. */ | |
901 | u64 avail = *array++; | |
902 | ||
903 | if (avail) { | |
904 | data->user_regs.regs = (u64 *)array; | |
905 | array += hweight_long(regs_user); | |
906 | } | |
907 | } | |
908 | ||
909 | if (type & PERF_SAMPLE_STACK_USER) { | |
910 | u64 size = *array++; | |
911 | ||
912 | data->user_stack.offset = ((char *)(array - 1) | |
913 | - (char *) event); | |
914 | ||
915 | if (!size) { | |
916 | data->user_stack.size = 0; | |
917 | } else { | |
918 | data->user_stack.data = (char *)array; | |
919 | array += size / sizeof(*array); | |
920 | data->user_stack.size = *array; | |
921 | } | |
922 | } | |
923 | ||
d0dd74e8 ACM |
924 | return 0; |
925 | } | |
74eec26f AV |
926 | |
927 | int perf_event__synthesize_sample(union perf_event *event, u64 type, | |
928 | const struct perf_sample *sample, | |
929 | bool swapped) | |
930 | { | |
931 | u64 *array; | |
932 | ||
933 | /* | |
934 | * used for cross-endian analysis. See git commit 65014ab3 | |
935 | * for why this goofiness is needed. | |
936 | */ | |
6a11f92e | 937 | union u64_swap u; |
74eec26f AV |
938 | |
939 | array = event->sample.array; | |
940 | ||
941 | if (type & PERF_SAMPLE_IP) { | |
942 | event->ip.ip = sample->ip; | |
943 | array++; | |
944 | } | |
945 | ||
946 | if (type & PERF_SAMPLE_TID) { | |
947 | u.val32[0] = sample->pid; | |
948 | u.val32[1] = sample->tid; | |
949 | if (swapped) { | |
950 | /* | |
a3f698fe | 951 | * Inverse of what is done in perf_evsel__parse_sample |
74eec26f AV |
952 | */ |
953 | u.val32[0] = bswap_32(u.val32[0]); | |
954 | u.val32[1] = bswap_32(u.val32[1]); | |
955 | u.val64 = bswap_64(u.val64); | |
956 | } | |
957 | ||
958 | *array = u.val64; | |
959 | array++; | |
960 | } | |
961 | ||
962 | if (type & PERF_SAMPLE_TIME) { | |
963 | *array = sample->time; | |
964 | array++; | |
965 | } | |
966 | ||
967 | if (type & PERF_SAMPLE_ADDR) { | |
968 | *array = sample->addr; | |
969 | array++; | |
970 | } | |
971 | ||
972 | if (type & PERF_SAMPLE_ID) { | |
973 | *array = sample->id; | |
974 | array++; | |
975 | } | |
976 | ||
977 | if (type & PERF_SAMPLE_STREAM_ID) { | |
978 | *array = sample->stream_id; | |
979 | array++; | |
980 | } | |
981 | ||
982 | if (type & PERF_SAMPLE_CPU) { | |
983 | u.val32[0] = sample->cpu; | |
984 | if (swapped) { | |
985 | /* | |
a3f698fe | 986 | * Inverse of what is done in perf_evsel__parse_sample |
74eec26f AV |
987 | */ |
988 | u.val32[0] = bswap_32(u.val32[0]); | |
989 | u.val64 = bswap_64(u.val64); | |
990 | } | |
991 | *array = u.val64; | |
992 | array++; | |
993 | } | |
994 | ||
995 | if (type & PERF_SAMPLE_PERIOD) { | |
996 | *array = sample->period; | |
997 | array++; | |
998 | } | |
999 | ||
1000 | return 0; | |
1001 | } |