perf lock: Clean up various details
[deliverable/linux.git] / tools / perf / util / session.c
CommitLineData
94c744b6
ACM
1#include <linux/kernel.h>
2
ba21594c 3#include <byteswap.h>
94c744b6
ACM
4#include <unistd.h>
5#include <sys/types.h>
6
7#include "session.h"
a328626b 8#include "sort.h"
94c744b6
ACM
9#include "util.h"
10
11static int perf_session__open(struct perf_session *self, bool force)
12{
13 struct stat input_stat;
14
15 self->fd = open(self->filename, O_RDONLY);
16 if (self->fd < 0) {
17 pr_err("failed to open file: %s", self->filename);
18 if (!strcmp(self->filename, "perf.data"))
19 pr_err(" (try 'perf record' first)");
20 pr_err("\n");
21 return -errno;
22 }
23
24 if (fstat(self->fd, &input_stat) < 0)
25 goto out_close;
26
27 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
28 pr_err("file %s not owned by current user or root\n",
29 self->filename);
30 goto out_close;
31 }
32
33 if (!input_stat.st_size) {
34 pr_info("zero-sized file (%s), nothing to do!\n",
35 self->filename);
36 goto out_close;
37 }
38
39 if (perf_header__read(&self->header, self->fd) < 0) {
40 pr_err("incompatible file format");
41 goto out_close;
42 }
43
44 self->size = input_stat.st_size;
45 return 0;
46
47out_close:
48 close(self->fd);
49 self->fd = -1;
50 return -1;
51}
52
75be6cf4 53struct perf_session *perf_session__new(const char *filename, int mode, bool force)
94c744b6 54{
b3165f41 55 size_t len = filename ? strlen(filename) + 1 : 0;
94c744b6
ACM
56 struct perf_session *self = zalloc(sizeof(*self) + len);
57
58 if (self == NULL)
59 goto out;
60
61 if (perf_header__init(&self->header) < 0)
4aa65636 62 goto out_free;
94c744b6
ACM
63
64 memcpy(self->filename, filename, len);
b3165f41
ACM
65 self->threads = RB_ROOT;
66 self->last_match = NULL;
ec913369
ACM
67 self->mmap_window = 32;
68 self->cwd = NULL;
69 self->cwdlen = 0;
31d337c4 70 self->unknown_events = 0;
4aa65636 71 map_groups__init(&self->kmaps);
94c744b6 72
64abebf7
ACM
73 if (mode == O_RDONLY) {
74 if (perf_session__open(self, force) < 0)
75 goto out_delete;
76 } else if (mode == O_WRONLY) {
77 /*
78 * In O_RDONLY mode this will be performed when reading the
79 * kernel MMAP event, in event__process_mmap().
80 */
81 if (perf_session__create_kernel_maps(self) < 0)
82 goto out_delete;
83 }
d549c769
ACM
84
85 self->sample_type = perf_header__sample_type(&self->header);
94c744b6
ACM
86out:
87 return self;
4aa65636 88out_free:
94c744b6
ACM
89 free(self);
90 return NULL;
4aa65636
ACM
91out_delete:
92 perf_session__delete(self);
93 return NULL;
94c744b6
ACM
94}
95
96void perf_session__delete(struct perf_session *self)
97{
98 perf_header__exit(&self->header);
99 close(self->fd);
ec913369 100 free(self->cwd);
94c744b6
ACM
101 free(self);
102}
a328626b
ACM
103
104static bool symbol__match_parent_regex(struct symbol *sym)
105{
106 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
107 return 1;
108
109 return 0;
110}
111
112struct symbol **perf_session__resolve_callchain(struct perf_session *self,
113 struct thread *thread,
114 struct ip_callchain *chain,
115 struct symbol **parent)
116{
117 u8 cpumode = PERF_RECORD_MISC_USER;
118 struct symbol **syms = NULL;
119 unsigned int i;
120
d599db3f 121 if (symbol_conf.use_callchain) {
a328626b
ACM
122 syms = calloc(chain->nr, sizeof(*syms));
123 if (!syms) {
124 fprintf(stderr, "Can't allocate memory for symbols\n");
125 exit(-1);
126 }
127 }
128
129 for (i = 0; i < chain->nr; i++) {
130 u64 ip = chain->ips[i];
131 struct addr_location al;
132
133 if (ip >= PERF_CONTEXT_MAX) {
134 switch (ip) {
135 case PERF_CONTEXT_HV:
136 cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
137 case PERF_CONTEXT_KERNEL:
138 cpumode = PERF_RECORD_MISC_KERNEL; break;
139 case PERF_CONTEXT_USER:
140 cpumode = PERF_RECORD_MISC_USER; break;
141 default:
142 break;
143 }
144 continue;
145 }
146
147 thread__find_addr_location(thread, self, cpumode,
148 MAP__FUNCTION, ip, &al, NULL);
149 if (al.sym != NULL) {
150 if (sort__has_parent && !*parent &&
151 symbol__match_parent_regex(al.sym))
152 *parent = al.sym;
d599db3f 153 if (!symbol_conf.use_callchain)
a328626b
ACM
154 break;
155 syms[i] = al.sym;
156 }
157 }
158
159 return syms;
160}
06aae590
ACM
161
162static int process_event_stub(event_t *event __used,
163 struct perf_session *session __used)
164{
165 dump_printf(": unhandled!\n");
166 return 0;
167}
168
169static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
170{
55aa640f
ACM
171 if (handler->sample == NULL)
172 handler->sample = process_event_stub;
173 if (handler->mmap == NULL)
174 handler->mmap = process_event_stub;
175 if (handler->comm == NULL)
176 handler->comm = process_event_stub;
177 if (handler->fork == NULL)
178 handler->fork = process_event_stub;
179 if (handler->exit == NULL)
180 handler->exit = process_event_stub;
181 if (handler->lost == NULL)
182 handler->lost = process_event_stub;
183 if (handler->read == NULL)
184 handler->read = process_event_stub;
185 if (handler->throttle == NULL)
186 handler->throttle = process_event_stub;
187 if (handler->unthrottle == NULL)
188 handler->unthrottle = process_event_stub;
06aae590
ACM
189}
190
191static const char *event__name[] = {
192 [0] = "TOTAL",
193 [PERF_RECORD_MMAP] = "MMAP",
194 [PERF_RECORD_LOST] = "LOST",
195 [PERF_RECORD_COMM] = "COMM",
196 [PERF_RECORD_EXIT] = "EXIT",
197 [PERF_RECORD_THROTTLE] = "THROTTLE",
198 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
199 [PERF_RECORD_FORK] = "FORK",
200 [PERF_RECORD_READ] = "READ",
201 [PERF_RECORD_SAMPLE] = "SAMPLE",
202};
203
204unsigned long event__total[PERF_RECORD_MAX];
205
206void event__print_totals(void)
207{
208 int i;
209 for (i = 0; i < PERF_RECORD_MAX; ++i)
210 pr_info("%10s events: %10ld\n",
211 event__name[i], event__total[i]);
212}
213
ba21594c
ACM
214void mem_bswap_64(void *src, int byte_size)
215{
216 u64 *m = src;
217
218 while (byte_size > 0) {
219 *m = bswap_64(*m);
220 byte_size -= sizeof(u64);
221 ++m;
222 }
223}
224
225static void event__all64_swap(event_t *self)
226{
227 struct perf_event_header *hdr = &self->header;
228 mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
229}
230
231static void event__comm_swap(event_t *self)
232{
233 self->comm.pid = bswap_32(self->comm.pid);
234 self->comm.tid = bswap_32(self->comm.tid);
235}
236
237static void event__mmap_swap(event_t *self)
238{
239 self->mmap.pid = bswap_32(self->mmap.pid);
240 self->mmap.tid = bswap_32(self->mmap.tid);
241 self->mmap.start = bswap_64(self->mmap.start);
242 self->mmap.len = bswap_64(self->mmap.len);
243 self->mmap.pgoff = bswap_64(self->mmap.pgoff);
244}
245
246static void event__task_swap(event_t *self)
247{
248 self->fork.pid = bswap_32(self->fork.pid);
249 self->fork.tid = bswap_32(self->fork.tid);
250 self->fork.ppid = bswap_32(self->fork.ppid);
251 self->fork.ptid = bswap_32(self->fork.ptid);
252 self->fork.time = bswap_64(self->fork.time);
253}
254
255static void event__read_swap(event_t *self)
256{
257 self->read.pid = bswap_32(self->read.pid);
258 self->read.tid = bswap_32(self->read.tid);
259 self->read.value = bswap_64(self->read.value);
260 self->read.time_enabled = bswap_64(self->read.time_enabled);
261 self->read.time_running = bswap_64(self->read.time_running);
262 self->read.id = bswap_64(self->read.id);
263}
264
265typedef void (*event__swap_op)(event_t *self);
266
267static event__swap_op event__swap_ops[] = {
268 [PERF_RECORD_MMAP] = event__mmap_swap,
269 [PERF_RECORD_COMM] = event__comm_swap,
270 [PERF_RECORD_FORK] = event__task_swap,
271 [PERF_RECORD_EXIT] = event__task_swap,
272 [PERF_RECORD_LOST] = event__all64_swap,
273 [PERF_RECORD_READ] = event__read_swap,
274 [PERF_RECORD_SAMPLE] = event__all64_swap,
275 [PERF_RECORD_MAX] = NULL,
276};
277
06aae590
ACM
278static int perf_session__process_event(struct perf_session *self,
279 event_t *event,
280 struct perf_event_ops *ops,
ba21594c 281 u64 offset, u64 head)
06aae590
ACM
282{
283 trace_event(event);
284
285 if (event->header.type < PERF_RECORD_MAX) {
ba21594c 286 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
0d755034 287 offset + head, event->header.size,
06aae590
ACM
288 event__name[event->header.type]);
289 ++event__total[0];
290 ++event__total[event->header.type];
291 }
292
ba21594c
ACM
293 if (self->header.needs_swap && event__swap_ops[event->header.type])
294 event__swap_ops[event->header.type](event);
295
06aae590
ACM
296 switch (event->header.type) {
297 case PERF_RECORD_SAMPLE:
55aa640f 298 return ops->sample(event, self);
06aae590 299 case PERF_RECORD_MMAP:
55aa640f 300 return ops->mmap(event, self);
06aae590 301 case PERF_RECORD_COMM:
55aa640f 302 return ops->comm(event, self);
06aae590 303 case PERF_RECORD_FORK:
55aa640f 304 return ops->fork(event, self);
06aae590 305 case PERF_RECORD_EXIT:
55aa640f 306 return ops->exit(event, self);
06aae590 307 case PERF_RECORD_LOST:
55aa640f 308 return ops->lost(event, self);
06aae590 309 case PERF_RECORD_READ:
55aa640f 310 return ops->read(event, self);
06aae590 311 case PERF_RECORD_THROTTLE:
55aa640f 312 return ops->throttle(event, self);
06aae590 313 case PERF_RECORD_UNTHROTTLE:
55aa640f 314 return ops->unthrottle(event, self);
06aae590 315 default:
31d337c4 316 self->unknown_events++;
06aae590
ACM
317 return -1;
318 }
319}
320
ba21594c
ACM
321void perf_event_header__bswap(struct perf_event_header *self)
322{
323 self->type = bswap_32(self->type);
324 self->misc = bswap_16(self->misc);
325 self->size = bswap_16(self->size);
326}
327
328int perf_header__read_build_ids(struct perf_header *self,
329 int input, u64 offset, u64 size)
06aae590
ACM
330{
331 struct build_id_event bev;
332 char filename[PATH_MAX];
333 u64 limit = offset + size;
334 int err = -1;
335
336 while (offset < limit) {
337 struct dso *dso;
338 ssize_t len;
a89e5abe 339 struct list_head *head = &dsos__user;
06aae590
ACM
340
341 if (read(input, &bev, sizeof(bev)) != sizeof(bev))
342 goto out;
343
ba21594c
ACM
344 if (self->needs_swap)
345 perf_event_header__bswap(&bev.header);
346
06aae590
ACM
347 len = bev.header.size - sizeof(bev);
348 if (read(input, filename, len) != len)
349 goto out;
350
a89e5abe
ACM
351 if (bev.header.misc & PERF_RECORD_MISC_KERNEL)
352 head = &dsos__kernel;
353
354 dso = __dsos__findnew(head, filename);
b7cece76 355 if (dso != NULL) {
06aae590 356 dso__set_build_id(dso, &bev.build_id);
b7cece76
ACM
357 if (head == &dsos__kernel && filename[0] == '[')
358 dso->kernel = 1;
359 }
06aae590
ACM
360
361 offset += bev.header.size;
362 }
363 err = 0;
364out:
365 return err;
366}
367
368static struct thread *perf_session__register_idle_thread(struct perf_session *self)
369{
370 struct thread *thread = perf_session__findnew(self, 0);
371
372 if (thread == NULL || thread__set_comm(thread, "swapper")) {
373 pr_err("problem inserting idle task.\n");
374 thread = NULL;
375 }
376
377 return thread;
378}
379
380int perf_session__process_events(struct perf_session *self,
381 struct perf_event_ops *ops)
382{
ba21594c
ACM
383 int err, mmap_prot, mmap_flags;
384 u64 head, shift;
385 u64 offset = 0;
06aae590
ACM
386 size_t page_size;
387 event_t *event;
388 uint32_t size;
389 char *buf;
390
391 if (perf_session__register_idle_thread(self) == NULL)
392 return -ENOMEM;
393
394 perf_event_ops__fill_defaults(ops);
395
1b75962e 396 page_size = sysconf(_SC_PAGESIZE);
06aae590
ACM
397
398 head = self->header.data_offset;
06aae590 399
f7d87444 400 if (!symbol_conf.full_paths) {
06aae590
ACM
401 char bf[PATH_MAX];
402
403 if (getcwd(bf, sizeof(bf)) == NULL) {
404 err = -errno;
405out_getcwd_err:
406 pr_err("failed to get the current directory\n");
407 goto out_err;
408 }
409 self->cwd = strdup(bf);
410 if (self->cwd == NULL) {
411 err = -ENOMEM;
412 goto out_getcwd_err;
413 }
414 self->cwdlen = strlen(self->cwd);
415 }
416
417 shift = page_size * (head / page_size);
418 offset += shift;
419 head -= shift;
420
ba21594c
ACM
421 mmap_prot = PROT_READ;
422 mmap_flags = MAP_SHARED;
423
424 if (self->header.needs_swap) {
425 mmap_prot |= PROT_WRITE;
426 mmap_flags = MAP_PRIVATE;
427 }
06aae590 428remap:
ba21594c
ACM
429 buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
430 mmap_flags, self->fd, offset);
06aae590
ACM
431 if (buf == MAP_FAILED) {
432 pr_err("failed to mmap file\n");
433 err = -errno;
434 goto out_err;
435 }
436
437more:
438 event = (event_t *)(buf + head);
439
ba21594c
ACM
440 if (self->header.needs_swap)
441 perf_event_header__bswap(&event->header);
06aae590
ACM
442 size = event->header.size;
443 if (size == 0)
444 size = 8;
445
446 if (head + event->header.size >= page_size * self->mmap_window) {
447 int munmap_ret;
448
449 shift = page_size * (head / page_size);
450
451 munmap_ret = munmap(buf, page_size * self->mmap_window);
452 assert(munmap_ret == 0);
453
454 offset += shift;
455 head -= shift;
456 goto remap;
457 }
458
459 size = event->header.size;
460
ba21594c 461 dump_printf("\n%#Lx [%#x]: event: %d\n",
0d755034 462 offset + head, event->header.size, event->header.type);
06aae590
ACM
463
464 if (size == 0 ||
465 perf_session__process_event(self, event, ops, offset, head) < 0) {
ba21594c 466 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
0d755034 467 offset + head, event->header.size,
06aae590
ACM
468 event->header.type);
469 /*
470 * assume we lost track of the stream, check alignment, and
471 * increment a single u64 in the hope to catch on again 'soon'.
472 */
473 if (unlikely(head & 7))
474 head &= ~7ULL;
475
476 size = 8;
477 }
478
479 head += size;
480
481 if (offset + head >= self->header.data_offset + self->header.data_size)
482 goto done;
483
484 if (offset + head < self->size)
485 goto more;
486done:
487 err = 0;
488out_err:
489 return err;
490}
27295592 491
d549c769 492bool perf_session__has_traces(struct perf_session *self, const char *msg)
27295592
ACM
493{
494 if (!(self->sample_type & PERF_SAMPLE_RAW)) {
d549c769
ACM
495 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
496 return false;
27295592
ACM
497 }
498
d549c769 499 return true;
27295592 500}
56b03f3c
ACM
501
502int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
503 const char *symbol_name,
504 u64 addr)
505{
506 char *bracket;
507
508 self->ref_reloc_sym.name = strdup(symbol_name);
509 if (self->ref_reloc_sym.name == NULL)
510 return -ENOMEM;
511
512 bracket = strchr(self->ref_reloc_sym.name, ']');
513 if (bracket)
514 *bracket = '\0';
515
516 self->ref_reloc_sym.addr = addr;
517 return 0;
518}
519
520static u64 map__reloc_map_ip(struct map *map, u64 ip)
521{
522 return ip + (s64)map->pgoff;
523}
524
525static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
526{
527 return ip - (s64)map->pgoff;
528}
529
530void perf_session__reloc_vmlinux_maps(struct perf_session *self,
531 u64 unrelocated_addr)
532{
533 enum map_type type;
534 s64 reloc = unrelocated_addr - self->ref_reloc_sym.addr;
535
536 if (!reloc)
537 return;
538
539 for (type = 0; type < MAP__NR_TYPES; ++type) {
540 struct map *map = self->vmlinux_maps[type];
541
542 map->map_ip = map__reloc_map_ip;
543 map->unmap_ip = map__reloc_unmap_ip;
544 map->pgoff = reloc;
545 }
546}
This page took 0.056313 seconds and 5 git commands to generate.