perf tools: Use O_LARGEFILE to open perf data file
[deliverable/linux.git] / tools / perf / util / session.c
CommitLineData
b8f46c5a
XG
1#define _LARGEFILE64_SOURCE
2#define _FILE_OFFSET_BITS 64
3
94c744b6
ACM
4#include <linux/kernel.h>
5
ba21594c 6#include <byteswap.h>
94c744b6
ACM
7#include <unistd.h>
8#include <sys/types.h>
9
10#include "session.h"
a328626b 11#include "sort.h"
94c744b6
ACM
12#include "util.h"
13
14static int perf_session__open(struct perf_session *self, bool force)
15{
16 struct stat input_stat;
17
b8f46c5a 18 self->fd = open(self->filename, O_RDONLY|O_LARGEFILE);
94c744b6
ACM
19 if (self->fd < 0) {
20 pr_err("failed to open file: %s", self->filename);
21 if (!strcmp(self->filename, "perf.data"))
22 pr_err(" (try 'perf record' first)");
23 pr_err("\n");
24 return -errno;
25 }
26
27 if (fstat(self->fd, &input_stat) < 0)
28 goto out_close;
29
30 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
31 pr_err("file %s not owned by current user or root\n",
32 self->filename);
33 goto out_close;
34 }
35
36 if (!input_stat.st_size) {
37 pr_info("zero-sized file (%s), nothing to do!\n",
38 self->filename);
39 goto out_close;
40 }
41
42 if (perf_header__read(&self->header, self->fd) < 0) {
43 pr_err("incompatible file format");
44 goto out_close;
45 }
46
47 self->size = input_stat.st_size;
48 return 0;
49
50out_close:
51 close(self->fd);
52 self->fd = -1;
53 return -1;
54}
55
75be6cf4 56struct perf_session *perf_session__new(const char *filename, int mode, bool force)
94c744b6 57{
b3165f41 58 size_t len = filename ? strlen(filename) + 1 : 0;
94c744b6
ACM
59 struct perf_session *self = zalloc(sizeof(*self) + len);
60
61 if (self == NULL)
62 goto out;
63
64 if (perf_header__init(&self->header) < 0)
4aa65636 65 goto out_free;
94c744b6
ACM
66
67 memcpy(self->filename, filename, len);
b3165f41
ACM
68 self->threads = RB_ROOT;
69 self->last_match = NULL;
ec913369
ACM
70 self->mmap_window = 32;
71 self->cwd = NULL;
72 self->cwdlen = 0;
31d337c4 73 self->unknown_events = 0;
4aa65636 74 map_groups__init(&self->kmaps);
94c744b6 75
64abebf7
ACM
76 if (mode == O_RDONLY) {
77 if (perf_session__open(self, force) < 0)
78 goto out_delete;
79 } else if (mode == O_WRONLY) {
80 /*
81 * In O_RDONLY mode this will be performed when reading the
82 * kernel MMAP event, in event__process_mmap().
83 */
84 if (perf_session__create_kernel_maps(self) < 0)
85 goto out_delete;
86 }
d549c769
ACM
87
88 self->sample_type = perf_header__sample_type(&self->header);
94c744b6
ACM
89out:
90 return self;
4aa65636 91out_free:
94c744b6
ACM
92 free(self);
93 return NULL;
4aa65636
ACM
94out_delete:
95 perf_session__delete(self);
96 return NULL;
94c744b6
ACM
97}
98
99void perf_session__delete(struct perf_session *self)
100{
101 perf_header__exit(&self->header);
102 close(self->fd);
ec913369 103 free(self->cwd);
94c744b6
ACM
104 free(self);
105}
a328626b
ACM
106
107static bool symbol__match_parent_regex(struct symbol *sym)
108{
109 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
110 return 1;
111
112 return 0;
113}
114
115struct symbol **perf_session__resolve_callchain(struct perf_session *self,
116 struct thread *thread,
117 struct ip_callchain *chain,
118 struct symbol **parent)
119{
120 u8 cpumode = PERF_RECORD_MISC_USER;
121 struct symbol **syms = NULL;
122 unsigned int i;
123
d599db3f 124 if (symbol_conf.use_callchain) {
a328626b
ACM
125 syms = calloc(chain->nr, sizeof(*syms));
126 if (!syms) {
127 fprintf(stderr, "Can't allocate memory for symbols\n");
128 exit(-1);
129 }
130 }
131
132 for (i = 0; i < chain->nr; i++) {
133 u64 ip = chain->ips[i];
134 struct addr_location al;
135
136 if (ip >= PERF_CONTEXT_MAX) {
137 switch (ip) {
138 case PERF_CONTEXT_HV:
139 cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
140 case PERF_CONTEXT_KERNEL:
141 cpumode = PERF_RECORD_MISC_KERNEL; break;
142 case PERF_CONTEXT_USER:
143 cpumode = PERF_RECORD_MISC_USER; break;
144 default:
145 break;
146 }
147 continue;
148 }
149
150 thread__find_addr_location(thread, self, cpumode,
151 MAP__FUNCTION, ip, &al, NULL);
152 if (al.sym != NULL) {
153 if (sort__has_parent && !*parent &&
154 symbol__match_parent_regex(al.sym))
155 *parent = al.sym;
d599db3f 156 if (!symbol_conf.use_callchain)
a328626b
ACM
157 break;
158 syms[i] = al.sym;
159 }
160 }
161
162 return syms;
163}
06aae590
ACM
164
165static int process_event_stub(event_t *event __used,
166 struct perf_session *session __used)
167{
168 dump_printf(": unhandled!\n");
169 return 0;
170}
171
172static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
173{
55aa640f
ACM
174 if (handler->sample == NULL)
175 handler->sample = process_event_stub;
176 if (handler->mmap == NULL)
177 handler->mmap = process_event_stub;
178 if (handler->comm == NULL)
179 handler->comm = process_event_stub;
180 if (handler->fork == NULL)
181 handler->fork = process_event_stub;
182 if (handler->exit == NULL)
183 handler->exit = process_event_stub;
184 if (handler->lost == NULL)
185 handler->lost = process_event_stub;
186 if (handler->read == NULL)
187 handler->read = process_event_stub;
188 if (handler->throttle == NULL)
189 handler->throttle = process_event_stub;
190 if (handler->unthrottle == NULL)
191 handler->unthrottle = process_event_stub;
06aae590
ACM
192}
193
194static const char *event__name[] = {
195 [0] = "TOTAL",
196 [PERF_RECORD_MMAP] = "MMAP",
197 [PERF_RECORD_LOST] = "LOST",
198 [PERF_RECORD_COMM] = "COMM",
199 [PERF_RECORD_EXIT] = "EXIT",
200 [PERF_RECORD_THROTTLE] = "THROTTLE",
201 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
202 [PERF_RECORD_FORK] = "FORK",
203 [PERF_RECORD_READ] = "READ",
204 [PERF_RECORD_SAMPLE] = "SAMPLE",
205};
206
207unsigned long event__total[PERF_RECORD_MAX];
208
209void event__print_totals(void)
210{
211 int i;
212 for (i = 0; i < PERF_RECORD_MAX; ++i)
213 pr_info("%10s events: %10ld\n",
214 event__name[i], event__total[i]);
215}
216
ba21594c
ACM
217void mem_bswap_64(void *src, int byte_size)
218{
219 u64 *m = src;
220
221 while (byte_size > 0) {
222 *m = bswap_64(*m);
223 byte_size -= sizeof(u64);
224 ++m;
225 }
226}
227
228static void event__all64_swap(event_t *self)
229{
230 struct perf_event_header *hdr = &self->header;
231 mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
232}
233
234static void event__comm_swap(event_t *self)
235{
236 self->comm.pid = bswap_32(self->comm.pid);
237 self->comm.tid = bswap_32(self->comm.tid);
238}
239
240static void event__mmap_swap(event_t *self)
241{
242 self->mmap.pid = bswap_32(self->mmap.pid);
243 self->mmap.tid = bswap_32(self->mmap.tid);
244 self->mmap.start = bswap_64(self->mmap.start);
245 self->mmap.len = bswap_64(self->mmap.len);
246 self->mmap.pgoff = bswap_64(self->mmap.pgoff);
247}
248
249static void event__task_swap(event_t *self)
250{
251 self->fork.pid = bswap_32(self->fork.pid);
252 self->fork.tid = bswap_32(self->fork.tid);
253 self->fork.ppid = bswap_32(self->fork.ppid);
254 self->fork.ptid = bswap_32(self->fork.ptid);
255 self->fork.time = bswap_64(self->fork.time);
256}
257
258static void event__read_swap(event_t *self)
259{
260 self->read.pid = bswap_32(self->read.pid);
261 self->read.tid = bswap_32(self->read.tid);
262 self->read.value = bswap_64(self->read.value);
263 self->read.time_enabled = bswap_64(self->read.time_enabled);
264 self->read.time_running = bswap_64(self->read.time_running);
265 self->read.id = bswap_64(self->read.id);
266}
267
268typedef void (*event__swap_op)(event_t *self);
269
270static event__swap_op event__swap_ops[] = {
271 [PERF_RECORD_MMAP] = event__mmap_swap,
272 [PERF_RECORD_COMM] = event__comm_swap,
273 [PERF_RECORD_FORK] = event__task_swap,
274 [PERF_RECORD_EXIT] = event__task_swap,
275 [PERF_RECORD_LOST] = event__all64_swap,
276 [PERF_RECORD_READ] = event__read_swap,
277 [PERF_RECORD_SAMPLE] = event__all64_swap,
278 [PERF_RECORD_MAX] = NULL,
279};
280
06aae590
ACM
281static int perf_session__process_event(struct perf_session *self,
282 event_t *event,
283 struct perf_event_ops *ops,
ba21594c 284 u64 offset, u64 head)
06aae590
ACM
285{
286 trace_event(event);
287
288 if (event->header.type < PERF_RECORD_MAX) {
ba21594c 289 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
0d755034 290 offset + head, event->header.size,
06aae590
ACM
291 event__name[event->header.type]);
292 ++event__total[0];
293 ++event__total[event->header.type];
294 }
295
ba21594c
ACM
296 if (self->header.needs_swap && event__swap_ops[event->header.type])
297 event__swap_ops[event->header.type](event);
298
06aae590
ACM
299 switch (event->header.type) {
300 case PERF_RECORD_SAMPLE:
55aa640f 301 return ops->sample(event, self);
06aae590 302 case PERF_RECORD_MMAP:
55aa640f 303 return ops->mmap(event, self);
06aae590 304 case PERF_RECORD_COMM:
55aa640f 305 return ops->comm(event, self);
06aae590 306 case PERF_RECORD_FORK:
55aa640f 307 return ops->fork(event, self);
06aae590 308 case PERF_RECORD_EXIT:
55aa640f 309 return ops->exit(event, self);
06aae590 310 case PERF_RECORD_LOST:
55aa640f 311 return ops->lost(event, self);
06aae590 312 case PERF_RECORD_READ:
55aa640f 313 return ops->read(event, self);
06aae590 314 case PERF_RECORD_THROTTLE:
55aa640f 315 return ops->throttle(event, self);
06aae590 316 case PERF_RECORD_UNTHROTTLE:
55aa640f 317 return ops->unthrottle(event, self);
06aae590 318 default:
31d337c4 319 self->unknown_events++;
06aae590
ACM
320 return -1;
321 }
322}
323
ba21594c
ACM
324void perf_event_header__bswap(struct perf_event_header *self)
325{
326 self->type = bswap_32(self->type);
327 self->misc = bswap_16(self->misc);
328 self->size = bswap_16(self->size);
329}
330
331int perf_header__read_build_ids(struct perf_header *self,
332 int input, u64 offset, u64 size)
06aae590
ACM
333{
334 struct build_id_event bev;
335 char filename[PATH_MAX];
336 u64 limit = offset + size;
337 int err = -1;
338
339 while (offset < limit) {
340 struct dso *dso;
341 ssize_t len;
a89e5abe 342 struct list_head *head = &dsos__user;
06aae590
ACM
343
344 if (read(input, &bev, sizeof(bev)) != sizeof(bev))
345 goto out;
346
ba21594c
ACM
347 if (self->needs_swap)
348 perf_event_header__bswap(&bev.header);
349
06aae590
ACM
350 len = bev.header.size - sizeof(bev);
351 if (read(input, filename, len) != len)
352 goto out;
353
a89e5abe
ACM
354 if (bev.header.misc & PERF_RECORD_MISC_KERNEL)
355 head = &dsos__kernel;
356
357 dso = __dsos__findnew(head, filename);
b7cece76 358 if (dso != NULL) {
06aae590 359 dso__set_build_id(dso, &bev.build_id);
b7cece76
ACM
360 if (head == &dsos__kernel && filename[0] == '[')
361 dso->kernel = 1;
362 }
06aae590
ACM
363
364 offset += bev.header.size;
365 }
366 err = 0;
367out:
368 return err;
369}
370
371static struct thread *perf_session__register_idle_thread(struct perf_session *self)
372{
373 struct thread *thread = perf_session__findnew(self, 0);
374
375 if (thread == NULL || thread__set_comm(thread, "swapper")) {
376 pr_err("problem inserting idle task.\n");
377 thread = NULL;
378 }
379
380 return thread;
381}
382
383int perf_session__process_events(struct perf_session *self,
384 struct perf_event_ops *ops)
385{
ba21594c
ACM
386 int err, mmap_prot, mmap_flags;
387 u64 head, shift;
388 u64 offset = 0;
06aae590
ACM
389 size_t page_size;
390 event_t *event;
391 uint32_t size;
392 char *buf;
393
394 if (perf_session__register_idle_thread(self) == NULL)
395 return -ENOMEM;
396
397 perf_event_ops__fill_defaults(ops);
398
1b75962e 399 page_size = sysconf(_SC_PAGESIZE);
06aae590
ACM
400
401 head = self->header.data_offset;
06aae590 402
f7d87444 403 if (!symbol_conf.full_paths) {
06aae590
ACM
404 char bf[PATH_MAX];
405
406 if (getcwd(bf, sizeof(bf)) == NULL) {
407 err = -errno;
408out_getcwd_err:
409 pr_err("failed to get the current directory\n");
410 goto out_err;
411 }
412 self->cwd = strdup(bf);
413 if (self->cwd == NULL) {
414 err = -ENOMEM;
415 goto out_getcwd_err;
416 }
417 self->cwdlen = strlen(self->cwd);
418 }
419
420 shift = page_size * (head / page_size);
421 offset += shift;
422 head -= shift;
423
ba21594c
ACM
424 mmap_prot = PROT_READ;
425 mmap_flags = MAP_SHARED;
426
427 if (self->header.needs_swap) {
428 mmap_prot |= PROT_WRITE;
429 mmap_flags = MAP_PRIVATE;
430 }
06aae590 431remap:
ba21594c
ACM
432 buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
433 mmap_flags, self->fd, offset);
06aae590
ACM
434 if (buf == MAP_FAILED) {
435 pr_err("failed to mmap file\n");
436 err = -errno;
437 goto out_err;
438 }
439
440more:
441 event = (event_t *)(buf + head);
442
ba21594c
ACM
443 if (self->header.needs_swap)
444 perf_event_header__bswap(&event->header);
06aae590
ACM
445 size = event->header.size;
446 if (size == 0)
447 size = 8;
448
449 if (head + event->header.size >= page_size * self->mmap_window) {
450 int munmap_ret;
451
452 shift = page_size * (head / page_size);
453
454 munmap_ret = munmap(buf, page_size * self->mmap_window);
455 assert(munmap_ret == 0);
456
457 offset += shift;
458 head -= shift;
459 goto remap;
460 }
461
462 size = event->header.size;
463
ba21594c 464 dump_printf("\n%#Lx [%#x]: event: %d\n",
0d755034 465 offset + head, event->header.size, event->header.type);
06aae590
ACM
466
467 if (size == 0 ||
468 perf_session__process_event(self, event, ops, offset, head) < 0) {
ba21594c 469 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
0d755034 470 offset + head, event->header.size,
06aae590
ACM
471 event->header.type);
472 /*
473 * assume we lost track of the stream, check alignment, and
474 * increment a single u64 in the hope to catch on again 'soon'.
475 */
476 if (unlikely(head & 7))
477 head &= ~7ULL;
478
479 size = 8;
480 }
481
482 head += size;
483
484 if (offset + head >= self->header.data_offset + self->header.data_size)
485 goto done;
486
487 if (offset + head < self->size)
488 goto more;
489done:
490 err = 0;
491out_err:
492 return err;
493}
27295592 494
d549c769 495bool perf_session__has_traces(struct perf_session *self, const char *msg)
27295592
ACM
496{
497 if (!(self->sample_type & PERF_SAMPLE_RAW)) {
d549c769
ACM
498 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
499 return false;
27295592
ACM
500 }
501
d549c769 502 return true;
27295592 503}
56b03f3c
ACM
504
505int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
506 const char *symbol_name,
507 u64 addr)
508{
509 char *bracket;
510
511 self->ref_reloc_sym.name = strdup(symbol_name);
512 if (self->ref_reloc_sym.name == NULL)
513 return -ENOMEM;
514
515 bracket = strchr(self->ref_reloc_sym.name, ']');
516 if (bracket)
517 *bracket = '\0';
518
519 self->ref_reloc_sym.addr = addr;
520 return 0;
521}
522
523static u64 map__reloc_map_ip(struct map *map, u64 ip)
524{
525 return ip + (s64)map->pgoff;
526}
527
528static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
529{
530 return ip - (s64)map->pgoff;
531}
532
533void perf_session__reloc_vmlinux_maps(struct perf_session *self,
534 u64 unrelocated_addr)
535{
536 enum map_type type;
537 s64 reloc = unrelocated_addr - self->ref_reloc_sym.addr;
538
539 if (!reloc)
540 return;
541
542 for (type = 0; type < MAP__NR_TYPES; ++type) {
543 struct map *map = self->vmlinux_maps[type];
544
545 map->map_ip = map__reloc_map_ip;
546 map->unmap_ip = map__reloc_unmap_ip;
547 map->pgoff = reloc;
548 }
549}
This page took 0.054681 seconds and 5 git commands to generate.