Merge branch 'core' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile...
[deliverable/linux.git] / tools / perf / util / session.c
CommitLineData
b8f46c5a
XG
1#define _FILE_OFFSET_BITS 64
2
94c744b6
ACM
3#include <linux/kernel.h>
4
ba21594c 5#include <byteswap.h>
94c744b6
ACM
6#include <unistd.h>
7#include <sys/types.h>
a41794cd 8#include <sys/mman.h>
94c744b6 9
e248de33
ACM
10#include "evlist.h"
11#include "evsel.h"
94c744b6 12#include "session.h"
a328626b 13#include "sort.h"
94c744b6
ACM
14#include "util.h"
15
16static int perf_session__open(struct perf_session *self, bool force)
17{
18 struct stat input_stat;
19
8dc58101
TZ
20 if (!strcmp(self->filename, "-")) {
21 self->fd_pipe = true;
22 self->fd = STDIN_FILENO;
23
a91e5431 24 if (perf_session__read_header(self, self->fd) < 0)
8dc58101
TZ
25 pr_err("incompatible file format");
26
27 return 0;
28 }
29
f887f301 30 self->fd = open(self->filename, O_RDONLY);
94c744b6 31 if (self->fd < 0) {
0f2c3de2
AI
32 int err = errno;
33
34 pr_err("failed to open %s: %s", self->filename, strerror(err));
35 if (err == ENOENT && !strcmp(self->filename, "perf.data"))
94c744b6
ACM
36 pr_err(" (try 'perf record' first)");
37 pr_err("\n");
38 return -errno;
39 }
40
41 if (fstat(self->fd, &input_stat) < 0)
42 goto out_close;
43
44 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
45 pr_err("file %s not owned by current user or root\n",
46 self->filename);
47 goto out_close;
48 }
49
50 if (!input_stat.st_size) {
51 pr_info("zero-sized file (%s), nothing to do!\n",
52 self->filename);
53 goto out_close;
54 }
55
a91e5431 56 if (perf_session__read_header(self, self->fd) < 0) {
94c744b6
ACM
57 pr_err("incompatible file format");
58 goto out_close;
59 }
60
c2a70653
ACM
61 if (!perf_evlist__valid_sample_type(self->evlist)) {
62 pr_err("non matching sample_type");
63 goto out_close;
64 }
65
66 if (!perf_evlist__valid_sample_id_all(self->evlist)) {
67 pr_err("non matching sample_id_all");
68 goto out_close;
69 }
70
94c744b6
ACM
71 self->size = input_stat.st_size;
72 return 0;
73
74out_close:
75 close(self->fd);
76 self->fd = -1;
77 return -1;
78}
79
9c90a61c 80static void perf_session__id_header_size(struct perf_session *session)
8dc58101 81{
8d50e5b4 82 struct perf_sample *data;
9c90a61c
ACM
83 u64 sample_type = session->sample_type;
84 u16 size = 0;
85
86 if (!session->sample_id_all)
87 goto out;
88
89 if (sample_type & PERF_SAMPLE_TID)
90 size += sizeof(data->tid) * 2;
91
92 if (sample_type & PERF_SAMPLE_TIME)
93 size += sizeof(data->time);
94
95 if (sample_type & PERF_SAMPLE_ID)
96 size += sizeof(data->id);
97
98 if (sample_type & PERF_SAMPLE_STREAM_ID)
99 size += sizeof(data->stream_id);
100
101 if (sample_type & PERF_SAMPLE_CPU)
102 size += sizeof(data->cpu) * 2;
103out:
104 session->id_hdr_size = size;
105}
106
9c90a61c
ACM
107void perf_session__update_sample_type(struct perf_session *self)
108{
a91e5431 109 self->sample_type = perf_evlist__sample_type(self->evlist);
c2a70653 110 self->sample_size = __perf_evsel__sample_size(self->sample_type);
a91e5431 111 self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
9c90a61c
ACM
112 perf_session__id_header_size(self);
113}
114
a1645ce1
ZY
115int perf_session__create_kernel_maps(struct perf_session *self)
116{
d118f8ba 117 int ret = machine__create_kernel_maps(&self->host_machine);
a1645ce1 118
a1645ce1 119 if (ret >= 0)
d118f8ba 120 ret = machines__create_guest_kernel_maps(&self->machines);
a1645ce1
ZY
121 return ret;
122}
123
076c6e45
ACM
124static void perf_session__destroy_kernel_maps(struct perf_session *self)
125{
126 machine__destroy_kernel_maps(&self->host_machine);
127 machines__destroy_guest_kernel_maps(&self->machines);
128}
129
21ef97f0
IM
130struct perf_session *perf_session__new(const char *filename, int mode,
131 bool force, bool repipe,
132 struct perf_event_ops *ops)
94c744b6 133{
b3165f41 134 size_t len = filename ? strlen(filename) + 1 : 0;
94c744b6
ACM
135 struct perf_session *self = zalloc(sizeof(*self) + len);
136
137 if (self == NULL)
138 goto out;
139
94c744b6 140 memcpy(self->filename, filename, len);
b3165f41 141 self->threads = RB_ROOT;
720a3aeb 142 INIT_LIST_HEAD(&self->dead_threads);
b3165f41 143 self->last_match = NULL;
55b44629
TG
144 /*
145 * On 64bit we can mmap the data file in one go. No need for tiny mmap
146 * slices. On 32bit we use 32MB.
147 */
148#if BITS_PER_LONG == 64
149 self->mmap_window = ULLONG_MAX;
150#else
151 self->mmap_window = 32 * 1024 * 1024ULL;
152#endif
23346f21 153 self->machines = RB_ROOT;
454c407e 154 self->repipe = repipe;
a1225dec 155 INIT_LIST_HEAD(&self->ordered_samples.samples);
020bb75a 156 INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
5c891f38 157 INIT_LIST_HEAD(&self->ordered_samples.to_free);
1f626bc3 158 machine__init(&self->host_machine, "", HOST_KERNEL_ID);
94c744b6 159
64abebf7
ACM
160 if (mode == O_RDONLY) {
161 if (perf_session__open(self, force) < 0)
162 goto out_delete;
a91e5431 163 perf_session__update_sample_type(self);
64abebf7
ACM
164 } else if (mode == O_WRONLY) {
165 /*
166 * In O_RDONLY mode this will be performed when reading the
8115d60c 167 * kernel MMAP event, in perf_event__process_mmap().
64abebf7
ACM
168 */
169 if (perf_session__create_kernel_maps(self) < 0)
170 goto out_delete;
171 }
d549c769 172
21ef97f0
IM
173 if (ops && ops->ordering_requires_timestamps &&
174 ops->ordered_samples && !self->sample_id_all) {
175 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
176 ops->ordered_samples = false;
177 }
178
94c744b6
ACM
179out:
180 return self;
4aa65636
ACM
181out_delete:
182 perf_session__delete(self);
183 return NULL;
94c744b6
ACM
184}
185
d65a458b
ACM
186static void perf_session__delete_dead_threads(struct perf_session *self)
187{
188 struct thread *n, *t;
189
190 list_for_each_entry_safe(t, n, &self->dead_threads, node) {
191 list_del(&t->node);
192 thread__delete(t);
193 }
194}
195
196static void perf_session__delete_threads(struct perf_session *self)
197{
198 struct rb_node *nd = rb_first(&self->threads);
199
200 while (nd) {
201 struct thread *t = rb_entry(nd, struct thread, rb_node);
202
203 rb_erase(&t->rb_node, &self->threads);
204 nd = rb_next(nd);
205 thread__delete(t);
206 }
207}
208
94c744b6
ACM
209void perf_session__delete(struct perf_session *self)
210{
076c6e45 211 perf_session__destroy_kernel_maps(self);
d65a458b
ACM
212 perf_session__delete_dead_threads(self);
213 perf_session__delete_threads(self);
214 machine__exit(&self->host_machine);
94c744b6
ACM
215 close(self->fd);
216 free(self);
217}
a328626b 218
720a3aeb
ACM
219void perf_session__remove_thread(struct perf_session *self, struct thread *th)
220{
70597f21 221 self->last_match = NULL;
720a3aeb
ACM
222 rb_erase(&th->rb_node, &self->threads);
223 /*
224 * We may have references to this thread, for instance in some hist_entry
225 * instances, so just move them to a separate list.
226 */
227 list_add_tail(&th->node, &self->dead_threads);
228}
229
a328626b
ACM
230static bool symbol__match_parent_regex(struct symbol *sym)
231{
232 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
233 return 1;
234
235 return 0;
236}
237
1b3a0e95
FW
238int perf_session__resolve_callchain(struct perf_session *self,
239 struct thread *thread,
240 struct ip_callchain *chain,
241 struct symbol **parent)
a328626b
ACM
242{
243 u8 cpumode = PERF_RECORD_MISC_USER;
a328626b 244 unsigned int i;
1b3a0e95 245 int err;
a328626b 246
1b3a0e95 247 callchain_cursor_reset(&self->callchain_cursor);
a328626b
ACM
248
249 for (i = 0; i < chain->nr; i++) {
d797fdc5 250 u64 ip;
a328626b
ACM
251 struct addr_location al;
252
d797fdc5
SL
253 if (callchain_param.order == ORDER_CALLEE)
254 ip = chain->ips[i];
255 else
256 ip = chain->ips[chain->nr - i - 1];
257
a328626b
ACM
258 if (ip >= PERF_CONTEXT_MAX) {
259 switch (ip) {
260 case PERF_CONTEXT_HV:
261 cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
262 case PERF_CONTEXT_KERNEL:
263 cpumode = PERF_RECORD_MISC_KERNEL; break;
264 case PERF_CONTEXT_USER:
265 cpumode = PERF_RECORD_MISC_USER; break;
266 default:
267 break;
268 }
269 continue;
270 }
271
a1645ce1 272 al.filtered = false;
a328626b 273 thread__find_addr_location(thread, self, cpumode,
a1645ce1 274 MAP__FUNCTION, thread->pid, ip, &al, NULL);
a328626b
ACM
275 if (al.sym != NULL) {
276 if (sort__has_parent && !*parent &&
277 symbol__match_parent_regex(al.sym))
278 *parent = al.sym;
d599db3f 279 if (!symbol_conf.use_callchain)
a328626b 280 break;
a328626b 281 }
1b3a0e95
FW
282
283 err = callchain_cursor_append(&self->callchain_cursor,
284 ip, al.map, al.sym);
285 if (err)
286 return err;
a328626b
ACM
287 }
288
1b3a0e95 289 return 0;
a328626b 290}
06aae590 291
8115d60c 292static int process_event_synth_stub(union perf_event *event __used,
640c03ce
ACM
293 struct perf_session *session __used)
294{
295 dump_printf(": unhandled!\n");
296 return 0;
297}
298
9e69c210
ACM
299static int process_event_sample_stub(union perf_event *event __used,
300 struct perf_sample *sample __used,
301 struct perf_evsel *evsel __used,
302 struct perf_session *session __used)
303{
304 dump_printf(": unhandled!\n");
305 return 0;
306}
307
8115d60c 308static int process_event_stub(union perf_event *event __used,
8d50e5b4 309 struct perf_sample *sample __used,
06aae590
ACM
310 struct perf_session *session __used)
311{
312 dump_printf(": unhandled!\n");
313 return 0;
314}
315
8115d60c 316static int process_finished_round_stub(union perf_event *event __used,
d6b17beb
FW
317 struct perf_session *session __used,
318 struct perf_event_ops *ops __used)
319{
320 dump_printf(": unhandled!\n");
321 return 0;
322}
323
8115d60c 324static int process_finished_round(union perf_event *event,
d6b17beb
FW
325 struct perf_session *session,
326 struct perf_event_ops *ops);
327
06aae590
ACM
328static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
329{
55aa640f 330 if (handler->sample == NULL)
9e69c210 331 handler->sample = process_event_sample_stub;
55aa640f
ACM
332 if (handler->mmap == NULL)
333 handler->mmap = process_event_stub;
334 if (handler->comm == NULL)
335 handler->comm = process_event_stub;
336 if (handler->fork == NULL)
337 handler->fork = process_event_stub;
338 if (handler->exit == NULL)
339 handler->exit = process_event_stub;
340 if (handler->lost == NULL)
8115d60c 341 handler->lost = perf_event__process_lost;
55aa640f
ACM
342 if (handler->read == NULL)
343 handler->read = process_event_stub;
344 if (handler->throttle == NULL)
345 handler->throttle = process_event_stub;
346 if (handler->unthrottle == NULL)
347 handler->unthrottle = process_event_stub;
2c46dbb5 348 if (handler->attr == NULL)
640c03ce 349 handler->attr = process_event_synth_stub;
cd19a035 350 if (handler->event_type == NULL)
640c03ce 351 handler->event_type = process_event_synth_stub;
9215545e 352 if (handler->tracing_data == NULL)
640c03ce 353 handler->tracing_data = process_event_synth_stub;
c7929e47 354 if (handler->build_id == NULL)
640c03ce 355 handler->build_id = process_event_synth_stub;
d6b17beb
FW
356 if (handler->finished_round == NULL) {
357 if (handler->ordered_samples)
358 handler->finished_round = process_finished_round;
359 else
360 handler->finished_round = process_finished_round_stub;
361 }
06aae590
ACM
362}
363
ba21594c
ACM
364void mem_bswap_64(void *src, int byte_size)
365{
366 u64 *m = src;
367
368 while (byte_size > 0) {
369 *m = bswap_64(*m);
370 byte_size -= sizeof(u64);
371 ++m;
372 }
373}
374
8115d60c 375static void perf_event__all64_swap(union perf_event *event)
ba21594c 376{
8115d60c
ACM
377 struct perf_event_header *hdr = &event->header;
378 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
ba21594c
ACM
379}
380
8115d60c 381static void perf_event__comm_swap(union perf_event *event)
ba21594c 382{
8115d60c
ACM
383 event->comm.pid = bswap_32(event->comm.pid);
384 event->comm.tid = bswap_32(event->comm.tid);
ba21594c
ACM
385}
386
8115d60c 387static void perf_event__mmap_swap(union perf_event *event)
ba21594c 388{
8115d60c
ACM
389 event->mmap.pid = bswap_32(event->mmap.pid);
390 event->mmap.tid = bswap_32(event->mmap.tid);
391 event->mmap.start = bswap_64(event->mmap.start);
392 event->mmap.len = bswap_64(event->mmap.len);
393 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
ba21594c
ACM
394}
395
8115d60c 396static void perf_event__task_swap(union perf_event *event)
ba21594c 397{
8115d60c
ACM
398 event->fork.pid = bswap_32(event->fork.pid);
399 event->fork.tid = bswap_32(event->fork.tid);
400 event->fork.ppid = bswap_32(event->fork.ppid);
401 event->fork.ptid = bswap_32(event->fork.ptid);
402 event->fork.time = bswap_64(event->fork.time);
ba21594c
ACM
403}
404
8115d60c 405static void perf_event__read_swap(union perf_event *event)
ba21594c 406{
8115d60c
ACM
407 event->read.pid = bswap_32(event->read.pid);
408 event->read.tid = bswap_32(event->read.tid);
409 event->read.value = bswap_64(event->read.value);
410 event->read.time_enabled = bswap_64(event->read.time_enabled);
411 event->read.time_running = bswap_64(event->read.time_running);
412 event->read.id = bswap_64(event->read.id);
ba21594c
ACM
413}
414
8115d60c 415static void perf_event__attr_swap(union perf_event *event)
2c46dbb5
TZ
416{
417 size_t size;
418
8115d60c
ACM
419 event->attr.attr.type = bswap_32(event->attr.attr.type);
420 event->attr.attr.size = bswap_32(event->attr.attr.size);
421 event->attr.attr.config = bswap_64(event->attr.attr.config);
422 event->attr.attr.sample_period = bswap_64(event->attr.attr.sample_period);
423 event->attr.attr.sample_type = bswap_64(event->attr.attr.sample_type);
424 event->attr.attr.read_format = bswap_64(event->attr.attr.read_format);
425 event->attr.attr.wakeup_events = bswap_32(event->attr.attr.wakeup_events);
426 event->attr.attr.bp_type = bswap_32(event->attr.attr.bp_type);
427 event->attr.attr.bp_addr = bswap_64(event->attr.attr.bp_addr);
428 event->attr.attr.bp_len = bswap_64(event->attr.attr.bp_len);
2c46dbb5 429
8115d60c
ACM
430 size = event->header.size;
431 size -= (void *)&event->attr.id - (void *)event;
432 mem_bswap_64(event->attr.id, size);
2c46dbb5
TZ
433}
434
8115d60c 435static void perf_event__event_type_swap(union perf_event *event)
cd19a035 436{
8115d60c
ACM
437 event->event_type.event_type.event_id =
438 bswap_64(event->event_type.event_type.event_id);
cd19a035
TZ
439}
440
8115d60c 441static void perf_event__tracing_data_swap(union perf_event *event)
9215545e 442{
8115d60c 443 event->tracing_data.size = bswap_32(event->tracing_data.size);
9215545e
TZ
444}
445
8115d60c 446typedef void (*perf_event__swap_op)(union perf_event *event);
ba21594c 447
8115d60c
ACM
448static perf_event__swap_op perf_event__swap_ops[] = {
449 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
450 [PERF_RECORD_COMM] = perf_event__comm_swap,
451 [PERF_RECORD_FORK] = perf_event__task_swap,
452 [PERF_RECORD_EXIT] = perf_event__task_swap,
453 [PERF_RECORD_LOST] = perf_event__all64_swap,
454 [PERF_RECORD_READ] = perf_event__read_swap,
455 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
456 [PERF_RECORD_HEADER_ATTR] = perf_event__attr_swap,
457 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
458 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
459 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
460 [PERF_RECORD_HEADER_MAX] = NULL,
ba21594c
ACM
461};
462
c61e52ee
FW
463struct sample_queue {
464 u64 timestamp;
e4c2df13 465 u64 file_offset;
8115d60c 466 union perf_event *event;
c61e52ee
FW
467 struct list_head list;
468};
469
020bb75a
TG
470static void perf_session_free_sample_buffers(struct perf_session *session)
471{
472 struct ordered_samples *os = &session->ordered_samples;
473
5c891f38 474 while (!list_empty(&os->to_free)) {
020bb75a
TG
475 struct sample_queue *sq;
476
5c891f38 477 sq = list_entry(os->to_free.next, struct sample_queue, list);
020bb75a
TG
478 list_del(&sq->list);
479 free(sq);
480 }
481}
482
cbf41645 483static int perf_session_deliver_event(struct perf_session *session,
8115d60c 484 union perf_event *event,
8d50e5b4 485 struct perf_sample *sample,
f74725dc
TG
486 struct perf_event_ops *ops,
487 u64 file_offset);
cbf41645 488
c61e52ee
FW
489static void flush_sample_queue(struct perf_session *s,
490 struct perf_event_ops *ops)
491{
a1225dec
TG
492 struct ordered_samples *os = &s->ordered_samples;
493 struct list_head *head = &os->samples;
c61e52ee 494 struct sample_queue *tmp, *iter;
8d50e5b4 495 struct perf_sample sample;
a1225dec
TG
496 u64 limit = os->next_flush;
497 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
5538beca 498 int ret;
c61e52ee 499
d6b17beb 500 if (!ops->ordered_samples || !limit)
c61e52ee
FW
501 return;
502
503 list_for_each_entry_safe(iter, tmp, head, list) {
504 if (iter->timestamp > limit)
a1225dec 505 break;
c61e52ee 506
5538beca
FW
507 ret = perf_session__parse_sample(s, iter->event, &sample);
508 if (ret)
509 pr_err("Can't parse sample, err = %d\n", ret);
510 else
511 perf_session_deliver_event(s, iter->event, &sample, ops,
512 iter->file_offset);
c61e52ee 513
a1225dec 514 os->last_flush = iter->timestamp;
c61e52ee 515 list_del(&iter->list);
020bb75a 516 list_add(&iter->list, &os->sample_cache);
c61e52ee 517 }
a1225dec
TG
518
519 if (list_empty(head)) {
520 os->last_sample = NULL;
521 } else if (last_ts <= limit) {
522 os->last_sample =
523 list_entry(head->prev, struct sample_queue, list);
524 }
c61e52ee
FW
525}
526
d6b17beb
FW
527/*
528 * When perf record finishes a pass on every buffers, it records this pseudo
529 * event.
530 * We record the max timestamp t found in the pass n.
531 * Assuming these timestamps are monotonic across cpus, we know that if
532 * a buffer still has events with timestamps below t, they will be all
533 * available and then read in the pass n + 1.
534 * Hence when we start to read the pass n + 2, we can safely flush every
535 * events with timestamps below t.
536 *
537 * ============ PASS n =================
538 * CPU 0 | CPU 1
539 * |
540 * cnt1 timestamps | cnt2 timestamps
541 * 1 | 2
542 * 2 | 3
543 * - | 4 <--- max recorded
544 *
545 * ============ PASS n + 1 ==============
546 * CPU 0 | CPU 1
547 * |
548 * cnt1 timestamps | cnt2 timestamps
549 * 3 | 5
550 * 4 | 6
551 * 5 | 7 <---- max recorded
552 *
553 * Flush every events below timestamp 4
554 *
555 * ============ PASS n + 2 ==============
556 * CPU 0 | CPU 1
557 * |
558 * cnt1 timestamps | cnt2 timestamps
559 * 6 | 8
560 * 7 | 9
561 * - | 10
562 *
563 * Flush every events below timestamp 7
564 * etc...
565 */
8115d60c 566static int process_finished_round(union perf_event *event __used,
d6b17beb
FW
567 struct perf_session *session,
568 struct perf_event_ops *ops)
569{
570 flush_sample_queue(session, ops);
571 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
572
573 return 0;
574}
575
c61e52ee 576/* The queue is ordered by time */
cbf41645 577static void __queue_event(struct sample_queue *new, struct perf_session *s)
c61e52ee 578{
a1225dec
TG
579 struct ordered_samples *os = &s->ordered_samples;
580 struct sample_queue *sample = os->last_sample;
581 u64 timestamp = new->timestamp;
582 struct list_head *p;
c61e52ee 583
a1225dec 584 os->last_sample = new;
c61e52ee 585
a1225dec
TG
586 if (!sample) {
587 list_add(&new->list, &os->samples);
588 os->max_timestamp = timestamp;
c61e52ee
FW
589 return;
590 }
591
592 /*
a1225dec
TG
593 * last_sample might point to some random place in the list as it's
594 * the last queued event. We expect that the new event is close to
595 * this.
c61e52ee 596 */
a1225dec
TG
597 if (sample->timestamp <= timestamp) {
598 while (sample->timestamp <= timestamp) {
599 p = sample->list.next;
600 if (p == &os->samples) {
601 list_add_tail(&new->list, &os->samples);
602 os->max_timestamp = timestamp;
603 return;
604 }
605 sample = list_entry(p, struct sample_queue, list);
606 }
607 list_add_tail(&new->list, &sample->list);
608 } else {
609 while (sample->timestamp > timestamp) {
610 p = sample->list.prev;
611 if (p == &os->samples) {
612 list_add(&new->list, &os->samples);
613 return;
614 }
615 sample = list_entry(p, struct sample_queue, list);
616 }
617 list_add(&new->list, &sample->list);
618 }
c61e52ee
FW
619}
620
5c891f38
TG
621#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
622
8115d60c 623static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
8d50e5b4 624 struct perf_sample *sample, u64 file_offset)
c61e52ee 625{
5c891f38
TG
626 struct ordered_samples *os = &s->ordered_samples;
627 struct list_head *sc = &os->sample_cache;
8d50e5b4 628 u64 timestamp = sample->time;
c61e52ee 629 struct sample_queue *new;
c61e52ee 630
79a14c1f 631 if (!timestamp || timestamp == ~0ULL)
cbf41645
TG
632 return -ETIME;
633
c61e52ee
FW
634 if (timestamp < s->ordered_samples.last_flush) {
635 printf("Warning: Timestamp below last timeslice flush\n");
636 return -EINVAL;
637 }
638
020bb75a
TG
639 if (!list_empty(sc)) {
640 new = list_entry(sc->next, struct sample_queue, list);
641 list_del(&new->list);
5c891f38
TG
642 } else if (os->sample_buffer) {
643 new = os->sample_buffer + os->sample_buffer_idx;
644 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
645 os->sample_buffer = NULL;
020bb75a 646 } else {
5c891f38
TG
647 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
648 if (!os->sample_buffer)
020bb75a 649 return -ENOMEM;
5c891f38
TG
650 list_add(&os->sample_buffer->list, &os->to_free);
651 os->sample_buffer_idx = 2;
652 new = os->sample_buffer + 1;
020bb75a 653 }
c61e52ee
FW
654
655 new->timestamp = timestamp;
e4c2df13 656 new->file_offset = file_offset;
fe174207 657 new->event = event;
c61e52ee 658
cbf41645 659 __queue_event(new, s);
640c03ce 660
640c03ce
ACM
661 return 0;
662}
c61e52ee 663
8d50e5b4 664static void callchain__printf(struct perf_sample *sample)
640c03ce
ACM
665{
666 unsigned int i;
c61e52ee 667
9486aa38 668 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
640c03ce
ACM
669
670 for (i = 0; i < sample->callchain->nr; i++)
9486aa38
ACM
671 printf("..... %2d: %016" PRIx64 "\n",
672 i, sample->callchain->ips[i]);
c61e52ee
FW
673}
674
9c90a61c 675static void perf_session__print_tstamp(struct perf_session *session,
8115d60c 676 union perf_event *event,
8d50e5b4 677 struct perf_sample *sample)
9c90a61c
ACM
678{
679 if (event->header.type != PERF_RECORD_SAMPLE &&
680 !session->sample_id_all) {
681 fputs("-1 -1 ", stdout);
682 return;
683 }
684
685 if ((session->sample_type & PERF_SAMPLE_CPU))
686 printf("%u ", sample->cpu);
687
688 if (session->sample_type & PERF_SAMPLE_TIME)
9486aa38 689 printf("%" PRIu64 " ", sample->time);
9c90a61c
ACM
690}
691
8115d60c 692static void dump_event(struct perf_session *session, union perf_event *event,
8d50e5b4 693 u64 file_offset, struct perf_sample *sample)
9aefcab0
TG
694{
695 if (!dump_trace)
696 return;
697
9486aa38
ACM
698 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
699 file_offset, event->header.size, event->header.type);
9aefcab0
TG
700
701 trace_event(event);
702
703 if (sample)
704 perf_session__print_tstamp(session, event, sample);
705
9486aa38 706 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
8115d60c 707 event->header.size, perf_event__name(event->header.type));
9aefcab0
TG
708}
709
8115d60c 710static void dump_sample(struct perf_session *session, union perf_event *event,
8d50e5b4 711 struct perf_sample *sample)
9aefcab0 712{
ddbc24b7
ACM
713 if (!dump_trace)
714 return;
715
7cec0922 716 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
9486aa38 717 event->header.misc, sample->pid, sample->tid, sample->ip,
7cec0922 718 sample->period, sample->addr);
9aefcab0
TG
719
720 if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
ddbc24b7 721 callchain__printf(sample);
9aefcab0
TG
722}
723
cbf41645 724static int perf_session_deliver_event(struct perf_session *session,
8115d60c 725 union perf_event *event,
8d50e5b4 726 struct perf_sample *sample,
f74725dc 727 struct perf_event_ops *ops,
532e7269 728 u64 file_offset)
cbf41645 729{
9e69c210
ACM
730 struct perf_evsel *evsel;
731
532e7269
TG
732 dump_event(session, event, file_offset, sample);
733
cbf41645
TG
734 switch (event->header.type) {
735 case PERF_RECORD_SAMPLE:
532e7269 736 dump_sample(session, event, sample);
9e69c210
ACM
737 evsel = perf_evlist__id2evsel(session->evlist, sample->id);
738 if (evsel == NULL) {
739 ++session->hists.stats.nr_unknown_id;
740 return -1;
741 }
742 return ops->sample(event, sample, evsel, session);
cbf41645
TG
743 case PERF_RECORD_MMAP:
744 return ops->mmap(event, sample, session);
745 case PERF_RECORD_COMM:
746 return ops->comm(event, sample, session);
747 case PERF_RECORD_FORK:
748 return ops->fork(event, sample, session);
749 case PERF_RECORD_EXIT:
750 return ops->exit(event, sample, session);
751 case PERF_RECORD_LOST:
752 return ops->lost(event, sample, session);
753 case PERF_RECORD_READ:
754 return ops->read(event, sample, session);
755 case PERF_RECORD_THROTTLE:
756 return ops->throttle(event, sample, session);
757 case PERF_RECORD_UNTHROTTLE:
758 return ops->unthrottle(event, sample, session);
759 default:
760 ++session->hists.stats.nr_unknown_events;
761 return -1;
762 }
763}
764
3dfc2c0a 765static int perf_session__preprocess_sample(struct perf_session *session,
8115d60c 766 union perf_event *event, struct perf_sample *sample)
3dfc2c0a
TG
767{
768 if (event->header.type != PERF_RECORD_SAMPLE ||
769 !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
770 return 0;
771
772 if (!ip_callchain__valid(sample->callchain, event)) {
773 pr_debug("call-chain problem with event, skipping it.\n");
774 ++session->hists.stats.nr_invalid_chains;
775 session->hists.stats.total_invalid_chains += sample->period;
776 return -EINVAL;
777 }
778 return 0;
779}
780
8115d60c 781static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
ba74f064 782 struct perf_event_ops *ops, u64 file_offset)
06aae590 783{
ba74f064 784 dump_event(session, event, file_offset, NULL);
06aae590 785
cbf41645 786 /* These events are processed right away */
06aae590 787 switch (event->header.type) {
2c46dbb5 788 case PERF_RECORD_HEADER_ATTR:
cbf41645 789 return ops->attr(event, session);
cd19a035 790 case PERF_RECORD_HEADER_EVENT_TYPE:
cbf41645 791 return ops->event_type(event, session);
9215545e
TZ
792 case PERF_RECORD_HEADER_TRACING_DATA:
793 /* setup for reading amidst mmap */
cbf41645
TG
794 lseek(session->fd, file_offset, SEEK_SET);
795 return ops->tracing_data(event, session);
c7929e47 796 case PERF_RECORD_HEADER_BUILD_ID:
cbf41645 797 return ops->build_id(event, session);
d6b17beb 798 case PERF_RECORD_FINISHED_ROUND:
cbf41645 799 return ops->finished_round(event, session, ops);
06aae590 800 default:
ba74f064 801 return -EINVAL;
06aae590 802 }
ba74f064
TG
803}
804
805static int perf_session__process_event(struct perf_session *session,
8115d60c 806 union perf_event *event,
ba74f064
TG
807 struct perf_event_ops *ops,
808 u64 file_offset)
809{
8d50e5b4 810 struct perf_sample sample;
ba74f064
TG
811 int ret;
812
8115d60c
ACM
813 if (session->header.needs_swap &&
814 perf_event__swap_ops[event->header.type])
815 perf_event__swap_ops[event->header.type](event);
ba74f064
TG
816
817 if (event->header.type >= PERF_RECORD_HEADER_MAX)
818 return -EINVAL;
819
820 hists__inc_nr_events(&session->hists, event->header.type);
821
822 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
823 return perf_session__process_user_event(session, event, ops, file_offset);
cbf41645 824
3dfc2c0a
TG
825 /*
826 * For all kernel events we get the sample data
827 */
5538beca
FW
828 ret = perf_session__parse_sample(session, event, &sample);
829 if (ret)
830 return ret;
3dfc2c0a
TG
831
832 /* Preprocess sample records - precheck callchains */
833 if (perf_session__preprocess_sample(session, event, &sample))
834 return 0;
835
cbf41645 836 if (ops->ordered_samples) {
e4c2df13
TG
837 ret = perf_session_queue_event(session, event, &sample,
838 file_offset);
cbf41645
TG
839 if (ret != -ETIME)
840 return ret;
841 }
842
f74725dc
TG
843 return perf_session_deliver_event(session, event, &sample, ops,
844 file_offset);
06aae590
ACM
845}
846
ba21594c
ACM
847void perf_event_header__bswap(struct perf_event_header *self)
848{
849 self->type = bswap_32(self->type);
850 self->misc = bswap_16(self->misc);
851 self->size = bswap_16(self->size);
852}
853
06aae590
ACM
854static struct thread *perf_session__register_idle_thread(struct perf_session *self)
855{
856 struct thread *thread = perf_session__findnew(self, 0);
857
858 if (thread == NULL || thread__set_comm(thread, "swapper")) {
859 pr_err("problem inserting idle task.\n");
860 thread = NULL;
861 }
862
863 return thread;
864}
865
11095994
ACM
866static void perf_session__warn_about_errors(const struct perf_session *session,
867 const struct perf_event_ops *ops)
868{
8115d60c 869 if (ops->lost == perf_event__process_lost &&
11095994 870 session->hists.stats.total_lost != 0) {
9486aa38
ACM
871 ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
872 "!\n\nCheck IO/CPU overload!\n\n",
11095994
ACM
873 session->hists.stats.total_period,
874 session->hists.stats.total_lost);
875 }
876
877 if (session->hists.stats.nr_unknown_events != 0) {
878 ui__warning("Found %u unknown events!\n\n"
879 "Is this an older tool processing a perf.data "
880 "file generated by a more recent tool?\n\n"
881 "If that is not the case, consider "
882 "reporting to linux-kernel@vger.kernel.org.\n\n",
883 session->hists.stats.nr_unknown_events);
884 }
885
9e69c210
ACM
886 if (session->hists.stats.nr_unknown_id != 0) {
887 ui__warning("%u samples with id not present in the header\n",
888 session->hists.stats.nr_unknown_id);
889 }
890
11095994
ACM
891 if (session->hists.stats.nr_invalid_chains != 0) {
892 ui__warning("Found invalid callchains!\n\n"
893 "%u out of %u events were discarded for this reason.\n\n"
894 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
895 session->hists.stats.nr_invalid_chains,
896 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
897 }
898}
899
8dc58101
TZ
900#define session_done() (*(volatile int *)(&session_done))
901volatile int session_done;
902
903static int __perf_session__process_pipe_events(struct perf_session *self,
904 struct perf_event_ops *ops)
905{
8115d60c 906 union perf_event event;
8dc58101
TZ
907 uint32_t size;
908 int skip = 0;
909 u64 head;
910 int err;
911 void *p;
912
913 perf_event_ops__fill_defaults(ops);
914
915 head = 0;
916more:
1e7972cc 917 err = readn(self->fd, &event, sizeof(struct perf_event_header));
8dc58101
TZ
918 if (err <= 0) {
919 if (err == 0)
920 goto done;
921
922 pr_err("failed to read event header\n");
923 goto out_err;
924 }
925
926 if (self->header.needs_swap)
927 perf_event_header__bswap(&event.header);
928
929 size = event.header.size;
930 if (size == 0)
931 size = 8;
932
933 p = &event;
934 p += sizeof(struct perf_event_header);
935
794e43b5 936 if (size - sizeof(struct perf_event_header)) {
1e7972cc 937 err = readn(self->fd, p, size - sizeof(struct perf_event_header));
794e43b5
TZ
938 if (err <= 0) {
939 if (err == 0) {
940 pr_err("unexpected end of event stream\n");
941 goto done;
942 }
8dc58101 943
794e43b5
TZ
944 pr_err("failed to read event data\n");
945 goto out_err;
946 }
8dc58101
TZ
947 }
948
949 if (size == 0 ||
0331ee0c 950 (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
9486aa38 951 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
8dc58101
TZ
952 head, event.header.size, event.header.type);
953 /*
954 * assume we lost track of the stream, check alignment, and
955 * increment a single u64 in the hope to catch on again 'soon'.
956 */
957 if (unlikely(head & 7))
958 head &= ~7ULL;
959
960 size = 8;
961 }
962
963 head += size;
964
8dc58101
TZ
965 if (skip > 0)
966 head += skip;
967
968 if (!session_done())
969 goto more;
970done:
971 err = 0;
972out_err:
11095994 973 perf_session__warn_about_errors(self, ops);
020bb75a 974 perf_session_free_sample_buffers(self);
8dc58101
TZ
975 return err;
976}
977
998bedc8
FW
978static union perf_event *
979fetch_mmaped_event(struct perf_session *session,
980 u64 head, size_t mmap_size, char *buf)
981{
982 union perf_event *event;
983
984 /*
985 * Ensure we have enough space remaining to read
986 * the size of the event in the headers.
987 */
988 if (head + sizeof(event->header) > mmap_size)
989 return NULL;
990
991 event = (union perf_event *)(buf + head);
992
993 if (session->header.needs_swap)
994 perf_event_header__bswap(&event->header);
995
996 if (head + event->header.size > mmap_size)
997 return NULL;
998
999 return event;
1000}
1001
0331ee0c 1002int __perf_session__process_events(struct perf_session *session,
6122e4e4
ACM
1003 u64 data_offset, u64 data_size,
1004 u64 file_size, struct perf_event_ops *ops)
06aae590 1005{
55b44629 1006 u64 head, page_offset, file_offset, file_pos, progress_next;
fe174207 1007 int err, mmap_prot, mmap_flags, map_idx = 0;
0331ee0c 1008 struct ui_progress *progress;
55b44629 1009 size_t page_size, mmap_size;
fe174207 1010 char *buf, *mmaps[8];
8115d60c 1011 union perf_event *event;
06aae590 1012 uint32_t size;
0331ee0c 1013
06aae590
ACM
1014 perf_event_ops__fill_defaults(ops);
1015
1b75962e 1016 page_size = sysconf(_SC_PAGESIZE);
06aae590 1017
0331ee0c
TG
1018 page_offset = page_size * (data_offset / page_size);
1019 file_offset = page_offset;
1020 head = data_offset - page_offset;
06aae590 1021
d6513281
TG
1022 if (data_offset + data_size < file_size)
1023 file_size = data_offset + data_size;
1024
55b44629
TG
1025 progress_next = file_size / 16;
1026 progress = ui_progress__new("Processing events...", file_size);
1027 if (progress == NULL)
1028 return -1;
1029
1030 mmap_size = session->mmap_window;
1031 if (mmap_size > file_size)
1032 mmap_size = file_size;
1033
fe174207
TG
1034 memset(mmaps, 0, sizeof(mmaps));
1035
ba21594c
ACM
1036 mmap_prot = PROT_READ;
1037 mmap_flags = MAP_SHARED;
1038
0331ee0c 1039 if (session->header.needs_swap) {
ba21594c
ACM
1040 mmap_prot |= PROT_WRITE;
1041 mmap_flags = MAP_PRIVATE;
1042 }
06aae590 1043remap:
55b44629
TG
1044 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
1045 file_offset);
06aae590
ACM
1046 if (buf == MAP_FAILED) {
1047 pr_err("failed to mmap file\n");
1048 err = -errno;
1049 goto out_err;
1050 }
fe174207
TG
1051 mmaps[map_idx] = buf;
1052 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
d6513281 1053 file_pos = file_offset + head;
06aae590
ACM
1054
1055more:
998bedc8
FW
1056 event = fetch_mmaped_event(session, head, mmap_size, buf);
1057 if (!event) {
fe174207
TG
1058 if (mmaps[map_idx]) {
1059 munmap(mmaps[map_idx], mmap_size);
1060 mmaps[map_idx] = NULL;
1061 }
06aae590 1062
0331ee0c
TG
1063 page_offset = page_size * (head / page_size);
1064 file_offset += page_offset;
1065 head -= page_offset;
06aae590
ACM
1066 goto remap;
1067 }
1068
1069 size = event->header.size;
1070
d6513281
TG
1071 if (size == 0 ||
1072 perf_session__process_event(session, event, ops, file_pos) < 0) {
9486aa38 1073 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
0331ee0c 1074 file_offset + head, event->header.size,
06aae590
ACM
1075 event->header.type);
1076 /*
1077 * assume we lost track of the stream, check alignment, and
1078 * increment a single u64 in the hope to catch on again 'soon'.
1079 */
1080 if (unlikely(head & 7))
1081 head &= ~7ULL;
1082
1083 size = 8;
1084 }
1085
1086 head += size;
d6513281 1087 file_pos += size;
06aae590 1088
55b44629
TG
1089 if (file_pos >= progress_next) {
1090 progress_next += file_size / 16;
1091 ui_progress__update(progress, file_pos);
1092 }
1093
d6513281 1094 if (file_pos < file_size)
06aae590 1095 goto more;
d6513281 1096
06aae590 1097 err = 0;
c61e52ee 1098 /* do the final flush for ordered samples */
0331ee0c
TG
1099 session->ordered_samples.next_flush = ULLONG_MAX;
1100 flush_sample_queue(session, ops);
06aae590 1101out_err:
5f4d3f88 1102 ui_progress__delete(progress);
11095994 1103 perf_session__warn_about_errors(session, ops);
020bb75a 1104 perf_session_free_sample_buffers(session);
06aae590
ACM
1105 return err;
1106}
27295592 1107
6122e4e4
ACM
1108int perf_session__process_events(struct perf_session *self,
1109 struct perf_event_ops *ops)
1110{
1111 int err;
1112
1113 if (perf_session__register_idle_thread(self) == NULL)
1114 return -ENOMEM;
1115
8dc58101
TZ
1116 if (!self->fd_pipe)
1117 err = __perf_session__process_events(self,
1118 self->header.data_offset,
1119 self->header.data_size,
1120 self->size, ops);
1121 else
1122 err = __perf_session__process_pipe_events(self, ops);
88ca895d 1123
6122e4e4
ACM
1124 return err;
1125}
1126
d549c769 1127bool perf_session__has_traces(struct perf_session *self, const char *msg)
27295592
ACM
1128{
1129 if (!(self->sample_type & PERF_SAMPLE_RAW)) {
d549c769
ACM
1130 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1131 return false;
27295592
ACM
1132 }
1133
d549c769 1134 return true;
27295592 1135}
56b03f3c 1136
a1645ce1 1137int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
56b03f3c
ACM
1138 const char *symbol_name,
1139 u64 addr)
1140{
1141 char *bracket;
9de89fe7 1142 enum map_type i;
a1645ce1
ZY
1143 struct ref_reloc_sym *ref;
1144
1145 ref = zalloc(sizeof(struct ref_reloc_sym));
1146 if (ref == NULL)
1147 return -ENOMEM;
56b03f3c 1148
a1645ce1
ZY
1149 ref->name = strdup(symbol_name);
1150 if (ref->name == NULL) {
1151 free(ref);
56b03f3c 1152 return -ENOMEM;
a1645ce1 1153 }
56b03f3c 1154
a1645ce1 1155 bracket = strchr(ref->name, ']');
56b03f3c
ACM
1156 if (bracket)
1157 *bracket = '\0';
1158
a1645ce1 1159 ref->addr = addr;
9de89fe7
ACM
1160
1161 for (i = 0; i < MAP__NR_TYPES; ++i) {
a1645ce1
ZY
1162 struct kmap *kmap = map__kmap(maps[i]);
1163 kmap->ref_reloc_sym = ref;
9de89fe7
ACM
1164 }
1165
56b03f3c
ACM
1166 return 0;
1167}
1f626bc3
ACM
1168
1169size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1170{
1171 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
1172 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
1173 machines__fprintf_dsos(&self->machines, fp);
1174}
f869097e
ACM
1175
1176size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1177 bool with_hits)
1178{
1179 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
1180 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
1181}
e248de33
ACM
1182
1183size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1184{
1185 struct perf_evsel *pos;
1186 size_t ret = fprintf(fp, "Aggregated stats:\n");
1187
1188 ret += hists__fprintf_nr_events(&session->hists, fp);
1189
1190 list_for_each_entry(pos, &session->evlist->entries, node) {
1191 ret += fprintf(fp, "%s stats:\n", event_name(pos));
1192 ret += hists__fprintf_nr_events(&pos->hists, fp);
1193 }
1194
1195 return ret;
1196}
c0230b2b 1197
9cbdb702
DA
1198struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1199 unsigned int type)
1200{
1201 struct perf_evsel *pos;
1202
1203 list_for_each_entry(pos, &session->evlist->entries, node) {
1204 if (pos->attr.type == type)
1205 return pos;
1206 }
1207 return NULL;
1208}
1209
787bef17
DA
1210void perf_session__print_ip(union perf_event *event,
1211 struct perf_sample *sample,
1212 struct perf_session *session,
610723f2 1213 int print_sym, int print_dso)
c0230b2b
DA
1214{
1215 struct addr_location al;
1216 const char *symname, *dsoname;
1217 struct callchain_cursor *cursor = &session->callchain_cursor;
1218 struct callchain_cursor_node *node;
1219
1220 if (perf_event__preprocess_sample(event, session, &al, sample,
1221 NULL) < 0) {
1222 error("problem processing %d event, skipping it.\n",
1223 event->header.type);
1224 return;
1225 }
1226
1227 if (symbol_conf.use_callchain && sample->callchain) {
1228
1229 if (perf_session__resolve_callchain(session, al.thread,
1230 sample->callchain, NULL) != 0) {
1231 if (verbose)
1232 error("Failed to resolve callchain. Skipping\n");
1233 return;
1234 }
1235 callchain_cursor_commit(cursor);
1236
1237 while (1) {
1238 node = callchain_cursor_current(cursor);
1239 if (!node)
1240 break;
1241
787bef17
DA
1242 printf("\t%16" PRIx64, node->ip);
1243 if (print_sym) {
1244 if (node->sym && node->sym->name)
1245 symname = node->sym->name;
1246 else
1247 symname = "";
c0230b2b 1248
610723f2
DA
1249 printf(" %s", symname);
1250 }
1251 if (print_dso) {
787bef17
DA
1252 if (node->map && node->map->dso && node->map->dso->name)
1253 dsoname = node->map->dso->name;
1254 else
1255 dsoname = "";
c0230b2b 1256
610723f2 1257 printf(" (%s)", dsoname);
787bef17
DA
1258 }
1259 printf("\n");
c0230b2b
DA
1260
1261 callchain_cursor_advance(cursor);
1262 }
1263
1264 } else {
787bef17
DA
1265 printf("%16" PRIx64, al.addr);
1266 if (print_sym) {
1267 if (al.sym && al.sym->name)
1268 symname = al.sym->name;
1269 else
1270 symname = "";
c0230b2b 1271
610723f2
DA
1272 printf(" %s", symname);
1273 }
1274
1275 if (print_dso) {
787bef17
DA
1276 if (al.map && al.map->dso && al.map->dso->name)
1277 dsoname = al.map->dso->name;
1278 else
1279 dsoname = "";
c0230b2b 1280
610723f2 1281 printf(" (%s)", dsoname);
787bef17 1282 }
c0230b2b
DA
1283 }
1284}
This page took 0.146536 seconds and 5 git commands to generate.