Merge branch 'tip/perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/roste...
[deliverable/linux.git] / tools / perf / util / session.c
CommitLineData
b8f46c5a
XG
1#define _FILE_OFFSET_BITS 64
2
94c744b6
ACM
3#include <linux/kernel.h>
4
ba21594c 5#include <byteswap.h>
94c744b6
ACM
6#include <unistd.h>
7#include <sys/types.h>
a41794cd 8#include <sys/mman.h>
94c744b6
ACM
9
10#include "session.h"
a328626b 11#include "sort.h"
94c744b6
ACM
12#include "util.h"
13
14static int perf_session__open(struct perf_session *self, bool force)
15{
16 struct stat input_stat;
17
8dc58101
TZ
18 if (!strcmp(self->filename, "-")) {
19 self->fd_pipe = true;
20 self->fd = STDIN_FILENO;
21
22 if (perf_header__read(self, self->fd) < 0)
23 pr_err("incompatible file format");
24
25 return 0;
26 }
27
f887f301 28 self->fd = open(self->filename, O_RDONLY);
94c744b6
ACM
29 if (self->fd < 0) {
30 pr_err("failed to open file: %s", self->filename);
31 if (!strcmp(self->filename, "perf.data"))
32 pr_err(" (try 'perf record' first)");
33 pr_err("\n");
34 return -errno;
35 }
36
37 if (fstat(self->fd, &input_stat) < 0)
38 goto out_close;
39
40 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
41 pr_err("file %s not owned by current user or root\n",
42 self->filename);
43 goto out_close;
44 }
45
46 if (!input_stat.st_size) {
47 pr_info("zero-sized file (%s), nothing to do!\n",
48 self->filename);
49 goto out_close;
50 }
51
8dc58101 52 if (perf_header__read(self, self->fd) < 0) {
94c744b6
ACM
53 pr_err("incompatible file format");
54 goto out_close;
55 }
56
57 self->size = input_stat.st_size;
58 return 0;
59
60out_close:
61 close(self->fd);
62 self->fd = -1;
63 return -1;
64}
65
8dc58101
TZ
66void perf_session__update_sample_type(struct perf_session *self)
67{
68 self->sample_type = perf_header__sample_type(&self->header);
69}
70
a1645ce1
ZY
71int perf_session__create_kernel_maps(struct perf_session *self)
72{
d118f8ba 73 int ret = machine__create_kernel_maps(&self->host_machine);
a1645ce1 74
a1645ce1 75 if (ret >= 0)
d118f8ba 76 ret = machines__create_guest_kernel_maps(&self->machines);
a1645ce1
ZY
77 return ret;
78}
79
454c407e 80struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
94c744b6 81{
b3165f41 82 size_t len = filename ? strlen(filename) + 1 : 0;
94c744b6
ACM
83 struct perf_session *self = zalloc(sizeof(*self) + len);
84
85 if (self == NULL)
86 goto out;
87
88 if (perf_header__init(&self->header) < 0)
4aa65636 89 goto out_free;
94c744b6
ACM
90
91 memcpy(self->filename, filename, len);
b3165f41 92 self->threads = RB_ROOT;
1c02c4d2 93 self->hists_tree = RB_ROOT;
b3165f41 94 self->last_match = NULL;
ec913369
ACM
95 self->mmap_window = 32;
96 self->cwd = NULL;
97 self->cwdlen = 0;
23346f21 98 self->machines = RB_ROOT;
454c407e 99 self->repipe = repipe;
c61e52ee 100 INIT_LIST_HEAD(&self->ordered_samples.samples_head);
1f626bc3 101 machine__init(&self->host_machine, "", HOST_KERNEL_ID);
94c744b6 102
64abebf7
ACM
103 if (mode == O_RDONLY) {
104 if (perf_session__open(self, force) < 0)
105 goto out_delete;
106 } else if (mode == O_WRONLY) {
107 /*
108 * In O_RDONLY mode this will be performed when reading the
109 * kernel MMAP event, in event__process_mmap().
110 */
111 if (perf_session__create_kernel_maps(self) < 0)
112 goto out_delete;
113 }
d549c769 114
8dc58101 115 perf_session__update_sample_type(self);
94c744b6
ACM
116out:
117 return self;
4aa65636 118out_free:
94c744b6
ACM
119 free(self);
120 return NULL;
4aa65636
ACM
121out_delete:
122 perf_session__delete(self);
123 return NULL;
94c744b6
ACM
124}
125
126void perf_session__delete(struct perf_session *self)
127{
128 perf_header__exit(&self->header);
129 close(self->fd);
ec913369 130 free(self->cwd);
94c744b6
ACM
131 free(self);
132}
a328626b
ACM
133
134static bool symbol__match_parent_regex(struct symbol *sym)
135{
136 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
137 return 1;
138
139 return 0;
140}
141
b3c9ac08
ACM
142struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
143 struct thread *thread,
144 struct ip_callchain *chain,
145 struct symbol **parent)
a328626b
ACM
146{
147 u8 cpumode = PERF_RECORD_MISC_USER;
a328626b 148 unsigned int i;
ad5b217b 149 struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
a328626b 150
ad5b217b
ACM
151 if (!syms)
152 return NULL;
a328626b
ACM
153
154 for (i = 0; i < chain->nr; i++) {
155 u64 ip = chain->ips[i];
156 struct addr_location al;
157
158 if (ip >= PERF_CONTEXT_MAX) {
159 switch (ip) {
160 case PERF_CONTEXT_HV:
161 cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
162 case PERF_CONTEXT_KERNEL:
163 cpumode = PERF_RECORD_MISC_KERNEL; break;
164 case PERF_CONTEXT_USER:
165 cpumode = PERF_RECORD_MISC_USER; break;
166 default:
167 break;
168 }
169 continue;
170 }
171
a1645ce1 172 al.filtered = false;
a328626b 173 thread__find_addr_location(thread, self, cpumode,
a1645ce1 174 MAP__FUNCTION, thread->pid, ip, &al, NULL);
a328626b
ACM
175 if (al.sym != NULL) {
176 if (sort__has_parent && !*parent &&
177 symbol__match_parent_regex(al.sym))
178 *parent = al.sym;
d599db3f 179 if (!symbol_conf.use_callchain)
a328626b 180 break;
b3c9ac08
ACM
181 syms[i].map = al.map;
182 syms[i].sym = al.sym;
a328626b
ACM
183 }
184 }
185
186 return syms;
187}
06aae590
ACM
188
189static int process_event_stub(event_t *event __used,
190 struct perf_session *session __used)
191{
192 dump_printf(": unhandled!\n");
193 return 0;
194}
195
d6b17beb
FW
196static int process_finished_round_stub(event_t *event __used,
197 struct perf_session *session __used,
198 struct perf_event_ops *ops __used)
199{
200 dump_printf(": unhandled!\n");
201 return 0;
202}
203
204static int process_finished_round(event_t *event,
205 struct perf_session *session,
206 struct perf_event_ops *ops);
207
06aae590
ACM
208static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
209{
55aa640f
ACM
210 if (handler->sample == NULL)
211 handler->sample = process_event_stub;
212 if (handler->mmap == NULL)
213 handler->mmap = process_event_stub;
214 if (handler->comm == NULL)
215 handler->comm = process_event_stub;
216 if (handler->fork == NULL)
217 handler->fork = process_event_stub;
218 if (handler->exit == NULL)
219 handler->exit = process_event_stub;
220 if (handler->lost == NULL)
221 handler->lost = process_event_stub;
222 if (handler->read == NULL)
223 handler->read = process_event_stub;
224 if (handler->throttle == NULL)
225 handler->throttle = process_event_stub;
226 if (handler->unthrottle == NULL)
227 handler->unthrottle = process_event_stub;
2c46dbb5
TZ
228 if (handler->attr == NULL)
229 handler->attr = process_event_stub;
cd19a035
TZ
230 if (handler->event_type == NULL)
231 handler->event_type = process_event_stub;
9215545e
TZ
232 if (handler->tracing_data == NULL)
233 handler->tracing_data = process_event_stub;
c7929e47
TZ
234 if (handler->build_id == NULL)
235 handler->build_id = process_event_stub;
d6b17beb
FW
236 if (handler->finished_round == NULL) {
237 if (handler->ordered_samples)
238 handler->finished_round = process_finished_round;
239 else
240 handler->finished_round = process_finished_round_stub;
241 }
06aae590
ACM
242}
243
ba21594c
ACM
244void mem_bswap_64(void *src, int byte_size)
245{
246 u64 *m = src;
247
248 while (byte_size > 0) {
249 *m = bswap_64(*m);
250 byte_size -= sizeof(u64);
251 ++m;
252 }
253}
254
255static void event__all64_swap(event_t *self)
256{
257 struct perf_event_header *hdr = &self->header;
258 mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
259}
260
261static void event__comm_swap(event_t *self)
262{
263 self->comm.pid = bswap_32(self->comm.pid);
264 self->comm.tid = bswap_32(self->comm.tid);
265}
266
267static void event__mmap_swap(event_t *self)
268{
269 self->mmap.pid = bswap_32(self->mmap.pid);
270 self->mmap.tid = bswap_32(self->mmap.tid);
271 self->mmap.start = bswap_64(self->mmap.start);
272 self->mmap.len = bswap_64(self->mmap.len);
273 self->mmap.pgoff = bswap_64(self->mmap.pgoff);
274}
275
276static void event__task_swap(event_t *self)
277{
278 self->fork.pid = bswap_32(self->fork.pid);
279 self->fork.tid = bswap_32(self->fork.tid);
280 self->fork.ppid = bswap_32(self->fork.ppid);
281 self->fork.ptid = bswap_32(self->fork.ptid);
282 self->fork.time = bswap_64(self->fork.time);
283}
284
285static void event__read_swap(event_t *self)
286{
287 self->read.pid = bswap_32(self->read.pid);
288 self->read.tid = bswap_32(self->read.tid);
289 self->read.value = bswap_64(self->read.value);
290 self->read.time_enabled = bswap_64(self->read.time_enabled);
291 self->read.time_running = bswap_64(self->read.time_running);
292 self->read.id = bswap_64(self->read.id);
293}
294
2c46dbb5
TZ
295static void event__attr_swap(event_t *self)
296{
297 size_t size;
298
299 self->attr.attr.type = bswap_32(self->attr.attr.type);
300 self->attr.attr.size = bswap_32(self->attr.attr.size);
301 self->attr.attr.config = bswap_64(self->attr.attr.config);
302 self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period);
303 self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type);
304 self->attr.attr.read_format = bswap_64(self->attr.attr.read_format);
305 self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events);
306 self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type);
307 self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr);
308 self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len);
309
310 size = self->header.size;
311 size -= (void *)&self->attr.id - (void *)self;
312 mem_bswap_64(self->attr.id, size);
313}
314
cd19a035
TZ
315static void event__event_type_swap(event_t *self)
316{
317 self->event_type.event_type.event_id =
318 bswap_64(self->event_type.event_type.event_id);
319}
320
9215545e
TZ
321static void event__tracing_data_swap(event_t *self)
322{
323 self->tracing_data.size = bswap_32(self->tracing_data.size);
324}
325
ba21594c
ACM
326typedef void (*event__swap_op)(event_t *self);
327
328static event__swap_op event__swap_ops[] = {
329 [PERF_RECORD_MMAP] = event__mmap_swap,
330 [PERF_RECORD_COMM] = event__comm_swap,
331 [PERF_RECORD_FORK] = event__task_swap,
332 [PERF_RECORD_EXIT] = event__task_swap,
333 [PERF_RECORD_LOST] = event__all64_swap,
334 [PERF_RECORD_READ] = event__read_swap,
335 [PERF_RECORD_SAMPLE] = event__all64_swap,
2c46dbb5 336 [PERF_RECORD_HEADER_ATTR] = event__attr_swap,
cd19a035 337 [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap,
9215545e 338 [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap,
c7929e47 339 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
8dc58101 340 [PERF_RECORD_HEADER_MAX] = NULL,
ba21594c
ACM
341};
342
c61e52ee
FW
343struct sample_queue {
344 u64 timestamp;
345 struct sample_event *event;
346 struct list_head list;
347};
348
c61e52ee
FW
349static void flush_sample_queue(struct perf_session *s,
350 struct perf_event_ops *ops)
351{
352 struct list_head *head = &s->ordered_samples.samples_head;
d6b17beb 353 u64 limit = s->ordered_samples.next_flush;
c61e52ee
FW
354 struct sample_queue *tmp, *iter;
355
d6b17beb 356 if (!ops->ordered_samples || !limit)
c61e52ee
FW
357 return;
358
359 list_for_each_entry_safe(iter, tmp, head, list) {
360 if (iter->timestamp > limit)
361 return;
362
363 if (iter == s->ordered_samples.last_inserted)
364 s->ordered_samples.last_inserted = NULL;
365
366 ops->sample((event_t *)iter->event, s);
367
368 s->ordered_samples.last_flush = iter->timestamp;
369 list_del(&iter->list);
370 free(iter->event);
371 free(iter);
372 }
373}
374
d6b17beb
FW
375/*
376 * When perf record finishes a pass on every buffers, it records this pseudo
377 * event.
378 * We record the max timestamp t found in the pass n.
379 * Assuming these timestamps are monotonic across cpus, we know that if
380 * a buffer still has events with timestamps below t, they will be all
381 * available and then read in the pass n + 1.
382 * Hence when we start to read the pass n + 2, we can safely flush every
383 * events with timestamps below t.
384 *
385 * ============ PASS n =================
386 * CPU 0 | CPU 1
387 * |
388 * cnt1 timestamps | cnt2 timestamps
389 * 1 | 2
390 * 2 | 3
391 * - | 4 <--- max recorded
392 *
393 * ============ PASS n + 1 ==============
394 * CPU 0 | CPU 1
395 * |
396 * cnt1 timestamps | cnt2 timestamps
397 * 3 | 5
398 * 4 | 6
399 * 5 | 7 <---- max recorded
400 *
401 * Flush every events below timestamp 4
402 *
403 * ============ PASS n + 2 ==============
404 * CPU 0 | CPU 1
405 * |
406 * cnt1 timestamps | cnt2 timestamps
407 * 6 | 8
408 * 7 | 9
409 * - | 10
410 *
411 * Flush every events below timestamp 7
412 * etc...
413 */
414static int process_finished_round(event_t *event __used,
415 struct perf_session *session,
416 struct perf_event_ops *ops)
417{
418 flush_sample_queue(session, ops);
419 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
420
421 return 0;
422}
423
c61e52ee
FW
424static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
425{
426 struct sample_queue *iter;
427
428 list_for_each_entry_reverse(iter, head, list) {
429 if (iter->timestamp < new->timestamp) {
430 list_add(&new->list, &iter->list);
431 return;
432 }
433 }
434
435 list_add(&new->list, head);
436}
437
438static void __queue_sample_before(struct sample_queue *new,
439 struct sample_queue *iter,
440 struct list_head *head)
441{
442 list_for_each_entry_continue_reverse(iter, head, list) {
443 if (iter->timestamp < new->timestamp) {
444 list_add(&new->list, &iter->list);
445 return;
446 }
447 }
448
449 list_add(&new->list, head);
450}
451
452static void __queue_sample_after(struct sample_queue *new,
453 struct sample_queue *iter,
454 struct list_head *head)
455{
456 list_for_each_entry_continue(iter, head, list) {
457 if (iter->timestamp > new->timestamp) {
458 list_add_tail(&new->list, &iter->list);
459 return;
460 }
461 }
462 list_add_tail(&new->list, head);
463}
464
465/* The queue is ordered by time */
466static void __queue_sample_event(struct sample_queue *new,
467 struct perf_session *s)
468{
469 struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
470 struct list_head *head = &s->ordered_samples.samples_head;
471
472
473 if (!last_inserted) {
474 __queue_sample_end(new, head);
475 return;
476 }
477
478 /*
479 * Most of the time the current event has a timestamp
480 * very close to the last event inserted, unless we just switched
481 * to another event buffer. Having a sorting based on a list and
482 * on the last inserted event that is close to the current one is
483 * probably more efficient than an rbtree based sorting.
484 */
485 if (last_inserted->timestamp >= new->timestamp)
486 __queue_sample_before(new, last_inserted, head);
487 else
488 __queue_sample_after(new, last_inserted, head);
489}
490
491static int queue_sample_event(event_t *event, struct sample_data *data,
d6b17beb 492 struct perf_session *s)
c61e52ee
FW
493{
494 u64 timestamp = data->time;
495 struct sample_queue *new;
c61e52ee
FW
496
497
c61e52ee
FW
498 if (timestamp < s->ordered_samples.last_flush) {
499 printf("Warning: Timestamp below last timeslice flush\n");
500 return -EINVAL;
501 }
502
503 new = malloc(sizeof(*new));
504 if (!new)
505 return -ENOMEM;
506
507 new->timestamp = timestamp;
508
509 new->event = malloc(event->header.size);
510 if (!new->event) {
511 free(new);
512 return -ENOMEM;
513 }
514
515 memcpy(new->event, event, event->header.size);
516
517 __queue_sample_event(new, s);
518 s->ordered_samples.last_inserted = new;
519
d6b17beb
FW
520 if (new->timestamp > s->ordered_samples.max_timestamp)
521 s->ordered_samples.max_timestamp = new->timestamp;
c61e52ee
FW
522
523 return 0;
524}
525
526static int perf_session__process_sample(event_t *event, struct perf_session *s,
527 struct perf_event_ops *ops)
528{
529 struct sample_data data;
530
531 if (!ops->ordered_samples)
532 return ops->sample(event, s);
533
534 bzero(&data, sizeof(struct sample_data));
535 event__parse_sample(event, s->sample_type, &data);
536
d6b17beb 537 queue_sample_event(event, &data, s);
c61e52ee
FW
538
539 return 0;
540}
541
06aae590
ACM
542static int perf_session__process_event(struct perf_session *self,
543 event_t *event,
544 struct perf_event_ops *ops,
ba21594c 545 u64 offset, u64 head)
06aae590
ACM
546{
547 trace_event(event);
548
8dc58101 549 if (event->header.type < PERF_RECORD_HEADER_MAX) {
ba21594c 550 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
0d755034 551 offset + head, event->header.size,
06aae590 552 event__name[event->header.type]);
cee75ac7 553 hists__inc_nr_events(&self->hists, event->header.type);
06aae590
ACM
554 }
555
ba21594c
ACM
556 if (self->header.needs_swap && event__swap_ops[event->header.type])
557 event__swap_ops[event->header.type](event);
558
06aae590
ACM
559 switch (event->header.type) {
560 case PERF_RECORD_SAMPLE:
c61e52ee 561 return perf_session__process_sample(event, self, ops);
06aae590 562 case PERF_RECORD_MMAP:
55aa640f 563 return ops->mmap(event, self);
06aae590 564 case PERF_RECORD_COMM:
55aa640f 565 return ops->comm(event, self);
06aae590 566 case PERF_RECORD_FORK:
55aa640f 567 return ops->fork(event, self);
06aae590 568 case PERF_RECORD_EXIT:
55aa640f 569 return ops->exit(event, self);
06aae590 570 case PERF_RECORD_LOST:
55aa640f 571 return ops->lost(event, self);
06aae590 572 case PERF_RECORD_READ:
55aa640f 573 return ops->read(event, self);
06aae590 574 case PERF_RECORD_THROTTLE:
55aa640f 575 return ops->throttle(event, self);
06aae590 576 case PERF_RECORD_UNTHROTTLE:
55aa640f 577 return ops->unthrottle(event, self);
2c46dbb5
TZ
578 case PERF_RECORD_HEADER_ATTR:
579 return ops->attr(event, self);
cd19a035
TZ
580 case PERF_RECORD_HEADER_EVENT_TYPE:
581 return ops->event_type(event, self);
9215545e
TZ
582 case PERF_RECORD_HEADER_TRACING_DATA:
583 /* setup for reading amidst mmap */
584 lseek(self->fd, offset + head, SEEK_SET);
585 return ops->tracing_data(event, self);
c7929e47
TZ
586 case PERF_RECORD_HEADER_BUILD_ID:
587 return ops->build_id(event, self);
d6b17beb
FW
588 case PERF_RECORD_FINISHED_ROUND:
589 return ops->finished_round(event, self, ops);
06aae590 590 default:
c8446b9b 591 ++self->hists.stats.nr_unknown_events;
06aae590
ACM
592 return -1;
593 }
594}
595
ba21594c
ACM
596void perf_event_header__bswap(struct perf_event_header *self)
597{
598 self->type = bswap_32(self->type);
599 self->misc = bswap_16(self->misc);
600 self->size = bswap_16(self->size);
601}
602
06aae590
ACM
603static struct thread *perf_session__register_idle_thread(struct perf_session *self)
604{
605 struct thread *thread = perf_session__findnew(self, 0);
606
607 if (thread == NULL || thread__set_comm(thread, "swapper")) {
608 pr_err("problem inserting idle task.\n");
609 thread = NULL;
610 }
611
612 return thread;
613}
614
8dc58101
TZ
615int do_read(int fd, void *buf, size_t size)
616{
617 void *buf_start = buf;
618
619 while (size) {
620 int ret = read(fd, buf, size);
621
622 if (ret <= 0)
623 return ret;
624
625 size -= ret;
626 buf += ret;
627 }
628
629 return buf - buf_start;
630}
631
632#define session_done() (*(volatile int *)(&session_done))
633volatile int session_done;
634
635static int __perf_session__process_pipe_events(struct perf_session *self,
636 struct perf_event_ops *ops)
637{
638 event_t event;
639 uint32_t size;
640 int skip = 0;
641 u64 head;
642 int err;
643 void *p;
644
645 perf_event_ops__fill_defaults(ops);
646
647 head = 0;
648more:
649 err = do_read(self->fd, &event, sizeof(struct perf_event_header));
650 if (err <= 0) {
651 if (err == 0)
652 goto done;
653
654 pr_err("failed to read event header\n");
655 goto out_err;
656 }
657
658 if (self->header.needs_swap)
659 perf_event_header__bswap(&event.header);
660
661 size = event.header.size;
662 if (size == 0)
663 size = 8;
664
665 p = &event;
666 p += sizeof(struct perf_event_header);
667
794e43b5
TZ
668 if (size - sizeof(struct perf_event_header)) {
669 err = do_read(self->fd, p,
670 size - sizeof(struct perf_event_header));
671 if (err <= 0) {
672 if (err == 0) {
673 pr_err("unexpected end of event stream\n");
674 goto done;
675 }
8dc58101 676
794e43b5
TZ
677 pr_err("failed to read event data\n");
678 goto out_err;
679 }
8dc58101
TZ
680 }
681
682 if (size == 0 ||
683 (skip = perf_session__process_event(self, &event, ops,
684 0, head)) < 0) {
685 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
686 head, event.header.size, event.header.type);
687 /*
688 * assume we lost track of the stream, check alignment, and
689 * increment a single u64 in the hope to catch on again 'soon'.
690 */
691 if (unlikely(head & 7))
692 head &= ~7ULL;
693
694 size = 8;
695 }
696
697 head += size;
698
699 dump_printf("\n%#Lx [%#x]: event: %d\n",
700 head, event.header.size, event.header.type);
701
702 if (skip > 0)
703 head += skip;
704
705 if (!session_done())
706 goto more;
707done:
708 err = 0;
709out_err:
710 return err;
711}
712
6122e4e4
ACM
713int __perf_session__process_events(struct perf_session *self,
714 u64 data_offset, u64 data_size,
715 u64 file_size, struct perf_event_ops *ops)
06aae590 716{
ba21594c
ACM
717 int err, mmap_prot, mmap_flags;
718 u64 head, shift;
719 u64 offset = 0;
06aae590
ACM
720 size_t page_size;
721 event_t *event;
722 uint32_t size;
723 char *buf;
5f4d3f88
ACM
724 struct ui_progress *progress = ui_progress__new("Processing events...",
725 self->size);
726 if (progress == NULL)
727 return -1;
06aae590 728
06aae590
ACM
729 perf_event_ops__fill_defaults(ops);
730
1b75962e 731 page_size = sysconf(_SC_PAGESIZE);
06aae590 732
6122e4e4 733 head = data_offset;
06aae590
ACM
734 shift = page_size * (head / page_size);
735 offset += shift;
736 head -= shift;
737
ba21594c
ACM
738 mmap_prot = PROT_READ;
739 mmap_flags = MAP_SHARED;
740
741 if (self->header.needs_swap) {
742 mmap_prot |= PROT_WRITE;
743 mmap_flags = MAP_PRIVATE;
744 }
06aae590 745remap:
ba21594c
ACM
746 buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
747 mmap_flags, self->fd, offset);
06aae590
ACM
748 if (buf == MAP_FAILED) {
749 pr_err("failed to mmap file\n");
750 err = -errno;
751 goto out_err;
752 }
753
754more:
755 event = (event_t *)(buf + head);
5f4d3f88 756 ui_progress__update(progress, offset);
06aae590 757
ba21594c
ACM
758 if (self->header.needs_swap)
759 perf_event_header__bswap(&event->header);
06aae590
ACM
760 size = event->header.size;
761 if (size == 0)
762 size = 8;
763
764 if (head + event->header.size >= page_size * self->mmap_window) {
765 int munmap_ret;
766
767 shift = page_size * (head / page_size);
768
769 munmap_ret = munmap(buf, page_size * self->mmap_window);
770 assert(munmap_ret == 0);
771
772 offset += shift;
773 head -= shift;
774 goto remap;
775 }
776
777 size = event->header.size;
778
ba21594c 779 dump_printf("\n%#Lx [%#x]: event: %d\n",
0d755034 780 offset + head, event->header.size, event->header.type);
06aae590
ACM
781
782 if (size == 0 ||
783 perf_session__process_event(self, event, ops, offset, head) < 0) {
ba21594c 784 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
0d755034 785 offset + head, event->header.size,
06aae590
ACM
786 event->header.type);
787 /*
788 * assume we lost track of the stream, check alignment, and
789 * increment a single u64 in the hope to catch on again 'soon'.
790 */
791 if (unlikely(head & 7))
792 head &= ~7ULL;
793
794 size = 8;
795 }
796
797 head += size;
798
6122e4e4 799 if (offset + head >= data_offset + data_size)
06aae590
ACM
800 goto done;
801
6122e4e4 802 if (offset + head < file_size)
06aae590
ACM
803 goto more;
804done:
805 err = 0;
c61e52ee 806 /* do the final flush for ordered samples */
d6b17beb 807 self->ordered_samples.next_flush = ULLONG_MAX;
c61e52ee 808 flush_sample_queue(self, ops);
06aae590 809out_err:
5f4d3f88 810 ui_progress__delete(progress);
06aae590
ACM
811 return err;
812}
27295592 813
6122e4e4
ACM
814int perf_session__process_events(struct perf_session *self,
815 struct perf_event_ops *ops)
816{
817 int err;
818
819 if (perf_session__register_idle_thread(self) == NULL)
820 return -ENOMEM;
821
822 if (!symbol_conf.full_paths) {
823 char bf[PATH_MAX];
824
825 if (getcwd(bf, sizeof(bf)) == NULL) {
826 err = -errno;
827out_getcwd_err:
828 pr_err("failed to get the current directory\n");
829 goto out_err;
830 }
831 self->cwd = strdup(bf);
832 if (self->cwd == NULL) {
833 err = -ENOMEM;
834 goto out_getcwd_err;
835 }
836 self->cwdlen = strlen(self->cwd);
837 }
838
8dc58101
TZ
839 if (!self->fd_pipe)
840 err = __perf_session__process_events(self,
841 self->header.data_offset,
842 self->header.data_size,
843 self->size, ops);
844 else
845 err = __perf_session__process_pipe_events(self, ops);
6122e4e4
ACM
846out_err:
847 return err;
848}
849
d549c769 850bool perf_session__has_traces(struct perf_session *self, const char *msg)
27295592
ACM
851{
852 if (!(self->sample_type & PERF_SAMPLE_RAW)) {
d549c769
ACM
853 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
854 return false;
27295592
ACM
855 }
856
d549c769 857 return true;
27295592 858}
56b03f3c 859
a1645ce1 860int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
56b03f3c
ACM
861 const char *symbol_name,
862 u64 addr)
863{
864 char *bracket;
9de89fe7 865 enum map_type i;
a1645ce1
ZY
866 struct ref_reloc_sym *ref;
867
868 ref = zalloc(sizeof(struct ref_reloc_sym));
869 if (ref == NULL)
870 return -ENOMEM;
56b03f3c 871
a1645ce1
ZY
872 ref->name = strdup(symbol_name);
873 if (ref->name == NULL) {
874 free(ref);
56b03f3c 875 return -ENOMEM;
a1645ce1 876 }
56b03f3c 877
a1645ce1 878 bracket = strchr(ref->name, ']');
56b03f3c
ACM
879 if (bracket)
880 *bracket = '\0';
881
a1645ce1 882 ref->addr = addr;
9de89fe7
ACM
883
884 for (i = 0; i < MAP__NR_TYPES; ++i) {
a1645ce1
ZY
885 struct kmap *kmap = map__kmap(maps[i]);
886 kmap->ref_reloc_sym = ref;
9de89fe7
ACM
887 }
888
56b03f3c
ACM
889 return 0;
890}
1f626bc3
ACM
891
892size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
893{
894 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
895 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
896 machines__fprintf_dsos(&self->machines, fp);
897}
f869097e
ACM
898
899size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
900 bool with_hits)
901{
902 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
903 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
904}
This page took 0.08483 seconds and 5 git commands to generate.