perf tools: Fix 64 bit integer format strings
[deliverable/linux.git] / tools / perf / util / event.c
1 #include <linux/types.h>
2 #include "event.h"
3 #include "debug.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "string.h"
7 #include "strlist.h"
8 #include "thread.h"
9
10 static const char *event__name[] = {
11 [0] = "TOTAL",
12 [PERF_RECORD_MMAP] = "MMAP",
13 [PERF_RECORD_LOST] = "LOST",
14 [PERF_RECORD_COMM] = "COMM",
15 [PERF_RECORD_EXIT] = "EXIT",
16 [PERF_RECORD_THROTTLE] = "THROTTLE",
17 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
18 [PERF_RECORD_FORK] = "FORK",
19 [PERF_RECORD_READ] = "READ",
20 [PERF_RECORD_SAMPLE] = "SAMPLE",
21 [PERF_RECORD_HEADER_ATTR] = "ATTR",
22 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
23 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
24 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
25 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
26 };
27
28 const char *event__get_event_name(unsigned int id)
29 {
30 if (id >= ARRAY_SIZE(event__name))
31 return "INVALID";
32 if (!event__name[id])
33 return "UNKNOWN";
34 return event__name[id];
35 }
36
37 static struct sample_data synth_sample = {
38 .pid = -1,
39 .tid = -1,
40 .time = -1,
41 .stream_id = -1,
42 .cpu = -1,
43 .period = 1,
44 };
45
46 static pid_t event__synthesize_comm(event_t *event, pid_t pid, int full,
47 event__handler_t process,
48 struct perf_session *session)
49 {
50 char filename[PATH_MAX];
51 char bf[BUFSIZ];
52 FILE *fp;
53 size_t size = 0;
54 DIR *tasks;
55 struct dirent dirent, *next;
56 pid_t tgid = 0;
57
58 snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
59
60 fp = fopen(filename, "r");
61 if (fp == NULL) {
62 out_race:
63 /*
64 * We raced with a task exiting - just return:
65 */
66 pr_debug("couldn't open %s\n", filename);
67 return 0;
68 }
69
70 memset(&event->comm, 0, sizeof(event->comm));
71
72 while (!event->comm.comm[0] || !event->comm.pid) {
73 if (fgets(bf, sizeof(bf), fp) == NULL) {
74 pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
75 goto out;
76 }
77
78 if (memcmp(bf, "Name:", 5) == 0) {
79 char *name = bf + 5;
80 while (*name && isspace(*name))
81 ++name;
82 size = strlen(name) - 1;
83 memcpy(event->comm.comm, name, size++);
84 } else if (memcmp(bf, "Tgid:", 5) == 0) {
85 char *tgids = bf + 5;
86 while (*tgids && isspace(*tgids))
87 ++tgids;
88 tgid = event->comm.pid = atoi(tgids);
89 }
90 }
91
92 event->comm.header.type = PERF_RECORD_COMM;
93 size = ALIGN(size, sizeof(u64));
94 memset(event->comm.comm + size, 0, session->id_hdr_size);
95 event->comm.header.size = (sizeof(event->comm) -
96 (sizeof(event->comm.comm) - size) +
97 session->id_hdr_size);
98 if (!full) {
99 event->comm.tid = pid;
100
101 process(event, &synth_sample, session);
102 goto out;
103 }
104
105 snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
106
107 tasks = opendir(filename);
108 if (tasks == NULL)
109 goto out_race;
110
111 while (!readdir_r(tasks, &dirent, &next) && next) {
112 char *end;
113 pid = strtol(dirent.d_name, &end, 10);
114 if (*end)
115 continue;
116
117 event->comm.tid = pid;
118
119 process(event, &synth_sample, session);
120 }
121
122 closedir(tasks);
123 out:
124 fclose(fp);
125
126 return tgid;
127 }
128
129 static int event__synthesize_mmap_events(event_t *event, pid_t pid, pid_t tgid,
130 event__handler_t process,
131 struct perf_session *session)
132 {
133 char filename[PATH_MAX];
134 FILE *fp;
135
136 snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
137
138 fp = fopen(filename, "r");
139 if (fp == NULL) {
140 /*
141 * We raced with a task exiting - just return:
142 */
143 pr_debug("couldn't open %s\n", filename);
144 return -1;
145 }
146
147 event->header.type = PERF_RECORD_MMAP;
148 /*
149 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
150 */
151 event->header.misc = PERF_RECORD_MISC_USER;
152
153 while (1) {
154 char bf[BUFSIZ], *pbf = bf;
155 int n;
156 size_t size;
157 if (fgets(bf, sizeof(bf), fp) == NULL)
158 break;
159
160 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
161 n = hex2u64(pbf, &event->mmap.start);
162 if (n < 0)
163 continue;
164 pbf += n + 1;
165 n = hex2u64(pbf, &event->mmap.len);
166 if (n < 0)
167 continue;
168 pbf += n + 3;
169 if (*pbf == 'x') { /* vm_exec */
170 char *execname = strchr(bf, '/');
171
172 /* Catch VDSO */
173 if (execname == NULL)
174 execname = strstr(bf, "[vdso]");
175
176 if (execname == NULL)
177 continue;
178
179 pbf += 3;
180 n = hex2u64(pbf, &event->mmap.pgoff);
181
182 size = strlen(execname);
183 execname[size - 1] = '\0'; /* Remove \n */
184 memcpy(event->mmap.filename, execname, size);
185 size = ALIGN(size, sizeof(u64));
186 event->mmap.len -= event->mmap.start;
187 event->mmap.header.size = (sizeof(event->mmap) -
188 (sizeof(event->mmap.filename) - size));
189 memset(event->mmap.filename + size, 0, session->id_hdr_size);
190 event->mmap.header.size += session->id_hdr_size;
191 event->mmap.pid = tgid;
192 event->mmap.tid = pid;
193
194 process(event, &synth_sample, session);
195 }
196 }
197
198 fclose(fp);
199 return 0;
200 }
201
202 int event__synthesize_modules(event__handler_t process,
203 struct perf_session *session,
204 struct machine *machine)
205 {
206 struct rb_node *nd;
207 struct map_groups *kmaps = &machine->kmaps;
208 event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
209
210 if (event == NULL) {
211 pr_debug("Not enough memory synthesizing mmap event "
212 "for kernel modules\n");
213 return -1;
214 }
215
216 event->header.type = PERF_RECORD_MMAP;
217
218 /*
219 * kernel uses 0 for user space maps, see kernel/perf_event.c
220 * __perf_event_mmap
221 */
222 if (machine__is_host(machine))
223 event->header.misc = PERF_RECORD_MISC_KERNEL;
224 else
225 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
226
227 for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
228 nd; nd = rb_next(nd)) {
229 size_t size;
230 struct map *pos = rb_entry(nd, struct map, rb_node);
231
232 if (pos->dso->kernel)
233 continue;
234
235 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
236 event->mmap.header.type = PERF_RECORD_MMAP;
237 event->mmap.header.size = (sizeof(event->mmap) -
238 (sizeof(event->mmap.filename) - size));
239 memset(event->mmap.filename + size, 0, session->id_hdr_size);
240 event->mmap.header.size += session->id_hdr_size;
241 event->mmap.start = pos->start;
242 event->mmap.len = pos->end - pos->start;
243 event->mmap.pid = machine->pid;
244
245 memcpy(event->mmap.filename, pos->dso->long_name,
246 pos->dso->long_name_len + 1);
247 process(event, &synth_sample, session);
248 }
249
250 free(event);
251 return 0;
252 }
253
254 static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event,
255 pid_t pid, event__handler_t process,
256 struct perf_session *session)
257 {
258 pid_t tgid = event__synthesize_comm(comm_event, pid, 1, process,
259 session);
260 if (tgid == -1)
261 return -1;
262 return event__synthesize_mmap_events(mmap_event, pid, tgid,
263 process, session);
264 }
265
266 int event__synthesize_thread(pid_t pid, event__handler_t process,
267 struct perf_session *session)
268 {
269 event_t *comm_event, *mmap_event;
270 int err = -1;
271
272 comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
273 if (comm_event == NULL)
274 goto out;
275
276 mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
277 if (mmap_event == NULL)
278 goto out_free_comm;
279
280 err = __event__synthesize_thread(comm_event, mmap_event, pid,
281 process, session);
282 free(mmap_event);
283 out_free_comm:
284 free(comm_event);
285 out:
286 return err;
287 }
288
289 int event__synthesize_threads(event__handler_t process,
290 struct perf_session *session)
291 {
292 DIR *proc;
293 struct dirent dirent, *next;
294 event_t *comm_event, *mmap_event;
295 int err = -1;
296
297 comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
298 if (comm_event == NULL)
299 goto out;
300
301 mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
302 if (mmap_event == NULL)
303 goto out_free_comm;
304
305 proc = opendir("/proc");
306 if (proc == NULL)
307 goto out_free_mmap;
308
309 while (!readdir_r(proc, &dirent, &next) && next) {
310 char *end;
311 pid_t pid = strtol(dirent.d_name, &end, 10);
312
313 if (*end) /* only interested in proper numerical dirents */
314 continue;
315
316 __event__synthesize_thread(comm_event, mmap_event, pid,
317 process, session);
318 }
319
320 closedir(proc);
321 err = 0;
322 out_free_mmap:
323 free(mmap_event);
324 out_free_comm:
325 free(comm_event);
326 out:
327 return err;
328 }
329
330 struct process_symbol_args {
331 const char *name;
332 u64 start;
333 };
334
335 static int find_symbol_cb(void *arg, const char *name, char type,
336 u64 start, u64 end __used)
337 {
338 struct process_symbol_args *args = arg;
339
340 /*
341 * Must be a function or at least an alias, as in PARISC64, where "_text" is
342 * an 'A' to the same address as "_stext".
343 */
344 if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
345 type == 'A') || strcmp(name, args->name))
346 return 0;
347
348 args->start = start;
349 return 1;
350 }
351
352 int event__synthesize_kernel_mmap(event__handler_t process,
353 struct perf_session *session,
354 struct machine *machine,
355 const char *symbol_name)
356 {
357 size_t size;
358 const char *filename, *mmap_name;
359 char path[PATH_MAX];
360 char name_buff[PATH_MAX];
361 struct map *map;
362 int err;
363 /*
364 * We should get this from /sys/kernel/sections/.text, but till that is
365 * available use this, and after it is use this as a fallback for older
366 * kernels.
367 */
368 struct process_symbol_args args = { .name = symbol_name, };
369 event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
370
371 if (event == NULL) {
372 pr_debug("Not enough memory synthesizing mmap event "
373 "for kernel modules\n");
374 return -1;
375 }
376
377 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
378 if (machine__is_host(machine)) {
379 /*
380 * kernel uses PERF_RECORD_MISC_USER for user space maps,
381 * see kernel/perf_event.c __perf_event_mmap
382 */
383 event->header.misc = PERF_RECORD_MISC_KERNEL;
384 filename = "/proc/kallsyms";
385 } else {
386 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
387 if (machine__is_default_guest(machine))
388 filename = (char *) symbol_conf.default_guest_kallsyms;
389 else {
390 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
391 filename = path;
392 }
393 }
394
395 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
396 return -ENOENT;
397
398 map = machine->vmlinux_maps[MAP__FUNCTION];
399 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
400 "%s%s", mmap_name, symbol_name) + 1;
401 size = ALIGN(size, sizeof(u64));
402 event->mmap.header.type = PERF_RECORD_MMAP;
403 event->mmap.header.size = (sizeof(event->mmap) -
404 (sizeof(event->mmap.filename) - size) + session->id_hdr_size);
405 event->mmap.pgoff = args.start;
406 event->mmap.start = map->start;
407 event->mmap.len = map->end - event->mmap.start;
408 event->mmap.pid = machine->pid;
409
410 err = process(event, &synth_sample, session);
411 free(event);
412
413 return err;
414 }
415
416 static void thread__comm_adjust(struct thread *self, struct hists *hists)
417 {
418 char *comm = self->comm;
419
420 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
421 (!symbol_conf.comm_list ||
422 strlist__has_entry(symbol_conf.comm_list, comm))) {
423 u16 slen = strlen(comm);
424
425 if (hists__new_col_len(hists, HISTC_COMM, slen))
426 hists__set_col_len(hists, HISTC_THREAD, slen + 6);
427 }
428 }
429
430 static int thread__set_comm_adjust(struct thread *self, const char *comm,
431 struct hists *hists)
432 {
433 int ret = thread__set_comm(self, comm);
434
435 if (ret)
436 return ret;
437
438 thread__comm_adjust(self, hists);
439
440 return 0;
441 }
442
443 int event__process_comm(event_t *self, struct sample_data *sample __used,
444 struct perf_session *session)
445 {
446 struct thread *thread = perf_session__findnew(session, self->comm.tid);
447
448 dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid);
449
450 if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm,
451 &session->hists)) {
452 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
453 return -1;
454 }
455
456 return 0;
457 }
458
459 int event__process_lost(event_t *self, struct sample_data *sample __used,
460 struct perf_session *session)
461 {
462 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
463 self->lost.id, self->lost.lost);
464 session->hists.stats.total_lost += self->lost.lost;
465 return 0;
466 }
467
468 static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
469 {
470 maps[MAP__FUNCTION]->start = self->mmap.start;
471 maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
472 /*
473 * Be a bit paranoid here, some perf.data file came with
474 * a zero sized synthesized MMAP event for the kernel.
475 */
476 if (maps[MAP__FUNCTION]->end == 0)
477 maps[MAP__FUNCTION]->end = ~0ULL;
478 }
479
480 static int event__process_kernel_mmap(event_t *self,
481 struct perf_session *session)
482 {
483 struct map *map;
484 char kmmap_prefix[PATH_MAX];
485 struct machine *machine;
486 enum dso_kernel_type kernel_type;
487 bool is_kernel_mmap;
488
489 machine = perf_session__findnew_machine(session, self->mmap.pid);
490 if (!machine) {
491 pr_err("Can't find id %d's machine\n", self->mmap.pid);
492 goto out_problem;
493 }
494
495 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
496 if (machine__is_host(machine))
497 kernel_type = DSO_TYPE_KERNEL;
498 else
499 kernel_type = DSO_TYPE_GUEST_KERNEL;
500
501 is_kernel_mmap = memcmp(self->mmap.filename,
502 kmmap_prefix,
503 strlen(kmmap_prefix)) == 0;
504 if (self->mmap.filename[0] == '/' ||
505 (!is_kernel_mmap && self->mmap.filename[0] == '[')) {
506
507 char short_module_name[1024];
508 char *name, *dot;
509
510 if (self->mmap.filename[0] == '/') {
511 name = strrchr(self->mmap.filename, '/');
512 if (name == NULL)
513 goto out_problem;
514
515 ++name; /* skip / */
516 dot = strrchr(name, '.');
517 if (dot == NULL)
518 goto out_problem;
519 snprintf(short_module_name, sizeof(short_module_name),
520 "[%.*s]", (int)(dot - name), name);
521 strxfrchar(short_module_name, '-', '_');
522 } else
523 strcpy(short_module_name, self->mmap.filename);
524
525 map = machine__new_module(machine, self->mmap.start,
526 self->mmap.filename);
527 if (map == NULL)
528 goto out_problem;
529
530 name = strdup(short_module_name);
531 if (name == NULL)
532 goto out_problem;
533
534 map->dso->short_name = name;
535 map->dso->sname_alloc = 1;
536 map->end = map->start + self->mmap.len;
537 } else if (is_kernel_mmap) {
538 const char *symbol_name = (self->mmap.filename +
539 strlen(kmmap_prefix));
540 /*
541 * Should be there already, from the build-id table in
542 * the header.
543 */
544 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
545 kmmap_prefix);
546 if (kernel == NULL)
547 goto out_problem;
548
549 kernel->kernel = kernel_type;
550 if (__machine__create_kernel_maps(machine, kernel) < 0)
551 goto out_problem;
552
553 event_set_kernel_mmap_len(machine->vmlinux_maps, self);
554 perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
555 symbol_name,
556 self->mmap.pgoff);
557 if (machine__is_default_guest(machine)) {
558 /*
559 * preload dso of guest kernel and modules
560 */
561 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
562 NULL);
563 }
564 }
565 return 0;
566 out_problem:
567 return -1;
568 }
569
570 int event__process_mmap(event_t *self, struct sample_data *sample __used,
571 struct perf_session *session)
572 {
573 struct machine *machine;
574 struct thread *thread;
575 struct map *map;
576 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
577 int ret = 0;
578
579 dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
580 self->mmap.pid, self->mmap.tid, self->mmap.start,
581 self->mmap.len, self->mmap.pgoff, self->mmap.filename);
582
583 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
584 cpumode == PERF_RECORD_MISC_KERNEL) {
585 ret = event__process_kernel_mmap(self, session);
586 if (ret < 0)
587 goto out_problem;
588 return 0;
589 }
590
591 machine = perf_session__find_host_machine(session);
592 if (machine == NULL)
593 goto out_problem;
594 thread = perf_session__findnew(session, self->mmap.pid);
595 if (thread == NULL)
596 goto out_problem;
597 map = map__new(&machine->user_dsos, self->mmap.start,
598 self->mmap.len, self->mmap.pgoff,
599 self->mmap.pid, self->mmap.filename,
600 MAP__FUNCTION);
601 if (map == NULL)
602 goto out_problem;
603
604 thread__insert_map(thread, map);
605 return 0;
606
607 out_problem:
608 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
609 return 0;
610 }
611
612 int event__process_task(event_t *self, struct sample_data *sample __used,
613 struct perf_session *session)
614 {
615 struct thread *thread = perf_session__findnew(session, self->fork.tid);
616 struct thread *parent = perf_session__findnew(session, self->fork.ptid);
617
618 dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
619 self->fork.ppid, self->fork.ptid);
620
621 if (self->header.type == PERF_RECORD_EXIT) {
622 perf_session__remove_thread(session, thread);
623 return 0;
624 }
625
626 if (thread == NULL || parent == NULL ||
627 thread__fork(thread, parent) < 0) {
628 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
629 return -1;
630 }
631
632 return 0;
633 }
634
635 int event__process(event_t *event, struct sample_data *sample,
636 struct perf_session *session)
637 {
638 switch (event->header.type) {
639 case PERF_RECORD_COMM:
640 event__process_comm(event, sample, session);
641 break;
642 case PERF_RECORD_MMAP:
643 event__process_mmap(event, sample, session);
644 break;
645 case PERF_RECORD_FORK:
646 case PERF_RECORD_EXIT:
647 event__process_task(event, sample, session);
648 break;
649 default:
650 break;
651 }
652
653 return 0;
654 }
655
656 void thread__find_addr_map(struct thread *self,
657 struct perf_session *session, u8 cpumode,
658 enum map_type type, pid_t pid, u64 addr,
659 struct addr_location *al)
660 {
661 struct map_groups *mg = &self->mg;
662 struct machine *machine = NULL;
663
664 al->thread = self;
665 al->addr = addr;
666 al->cpumode = cpumode;
667 al->filtered = false;
668
669 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
670 al->level = 'k';
671 machine = perf_session__find_host_machine(session);
672 if (machine == NULL) {
673 al->map = NULL;
674 return;
675 }
676 mg = &machine->kmaps;
677 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
678 al->level = '.';
679 machine = perf_session__find_host_machine(session);
680 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
681 al->level = 'g';
682 machine = perf_session__find_machine(session, pid);
683 if (machine == NULL) {
684 al->map = NULL;
685 return;
686 }
687 mg = &machine->kmaps;
688 } else {
689 /*
690 * 'u' means guest os user space.
691 * TODO: We don't support guest user space. Might support late.
692 */
693 if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
694 al->level = 'u';
695 else
696 al->level = 'H';
697 al->map = NULL;
698
699 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
700 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
701 !perf_guest)
702 al->filtered = true;
703 if ((cpumode == PERF_RECORD_MISC_USER ||
704 cpumode == PERF_RECORD_MISC_KERNEL) &&
705 !perf_host)
706 al->filtered = true;
707
708 return;
709 }
710 try_again:
711 al->map = map_groups__find(mg, type, al->addr);
712 if (al->map == NULL) {
713 /*
714 * If this is outside of all known maps, and is a negative
715 * address, try to look it up in the kernel dso, as it might be
716 * a vsyscall or vdso (which executes in user-mode).
717 *
718 * XXX This is nasty, we should have a symbol list in the
719 * "[vdso]" dso, but for now lets use the old trick of looking
720 * in the whole kernel symbol list.
721 */
722 if ((long long)al->addr < 0 &&
723 cpumode == PERF_RECORD_MISC_KERNEL &&
724 machine && mg != &machine->kmaps) {
725 mg = &machine->kmaps;
726 goto try_again;
727 }
728 } else
729 al->addr = al->map->map_ip(al->map, al->addr);
730 }
731
732 void thread__find_addr_location(struct thread *self,
733 struct perf_session *session, u8 cpumode,
734 enum map_type type, pid_t pid, u64 addr,
735 struct addr_location *al,
736 symbol_filter_t filter)
737 {
738 thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
739 if (al->map != NULL)
740 al->sym = map__find_symbol(al->map, al->addr, filter);
741 else
742 al->sym = NULL;
743 }
744
745 static void dso__calc_col_width(struct dso *self, struct hists *hists)
746 {
747 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
748 (!symbol_conf.dso_list ||
749 strlist__has_entry(symbol_conf.dso_list, self->name))) {
750 u16 slen = dso__name_len(self);
751 hists__new_col_len(hists, HISTC_DSO, slen);
752 }
753
754 self->slen_calculated = 1;
755 }
756
757 int event__preprocess_sample(const event_t *self, struct perf_session *session,
758 struct addr_location *al, struct sample_data *data,
759 symbol_filter_t filter)
760 {
761 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
762 struct thread *thread = perf_session__findnew(session, self->ip.pid);
763
764 if (thread == NULL)
765 return -1;
766
767 if (symbol_conf.comm_list &&
768 !strlist__has_entry(symbol_conf.comm_list, thread->comm))
769 goto out_filtered;
770
771 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
772 /*
773 * Have we already created the kernel maps for the host machine?
774 *
775 * This should have happened earlier, when we processed the kernel MMAP
776 * events, but for older perf.data files there was no such thing, so do
777 * it now.
778 */
779 if (cpumode == PERF_RECORD_MISC_KERNEL &&
780 session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
781 machine__create_kernel_maps(&session->host_machine);
782
783 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
784 self->ip.pid, self->ip.ip, al);
785 dump_printf(" ...... dso: %s\n",
786 al->map ? al->map->dso->long_name :
787 al->level == 'H' ? "[hypervisor]" : "<not found>");
788 al->sym = NULL;
789 al->cpu = data->cpu;
790
791 if (al->map) {
792 if (symbol_conf.dso_list &&
793 (!al->map || !al->map->dso ||
794 !(strlist__has_entry(symbol_conf.dso_list,
795 al->map->dso->short_name) ||
796 (al->map->dso->short_name != al->map->dso->long_name &&
797 strlist__has_entry(symbol_conf.dso_list,
798 al->map->dso->long_name)))))
799 goto out_filtered;
800 /*
801 * We have to do this here as we may have a dso with no symbol
802 * hit that has a name longer than the ones with symbols
803 * sampled.
804 */
805 if (!sort_dso.elide && !al->map->dso->slen_calculated)
806 dso__calc_col_width(al->map->dso, &session->hists);
807
808 al->sym = map__find_symbol(al->map, al->addr, filter);
809 } else {
810 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
811
812 if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width &&
813 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
814 !symbol_conf.dso_list)
815 hists__set_col_len(&session->hists, HISTC_DSO,
816 unresolved_col_width);
817 }
818
819 if (symbol_conf.sym_list && al->sym &&
820 !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
821 goto out_filtered;
822
823 return 0;
824
825 out_filtered:
826 al->filtered = true;
827 return 0;
828 }
829
830 static int event__parse_id_sample(const event_t *event,
831 struct perf_session *session,
832 struct sample_data *sample)
833 {
834 const u64 *array;
835 u64 type;
836
837 sample->cpu = sample->pid = sample->tid = -1;
838 sample->stream_id = sample->id = sample->time = -1ULL;
839
840 if (!session->sample_id_all)
841 return 0;
842
843 array = event->sample.array;
844 array += ((event->header.size -
845 sizeof(event->header)) / sizeof(u64)) - 1;
846 type = session->sample_type;
847
848 if (type & PERF_SAMPLE_CPU) {
849 u32 *p = (u32 *)array;
850 sample->cpu = *p;
851 array--;
852 }
853
854 if (type & PERF_SAMPLE_STREAM_ID) {
855 sample->stream_id = *array;
856 array--;
857 }
858
859 if (type & PERF_SAMPLE_ID) {
860 sample->id = *array;
861 array--;
862 }
863
864 if (type & PERF_SAMPLE_TIME) {
865 sample->time = *array;
866 array--;
867 }
868
869 if (type & PERF_SAMPLE_TID) {
870 u32 *p = (u32 *)array;
871 sample->pid = p[0];
872 sample->tid = p[1];
873 }
874
875 return 0;
876 }
877
878 int event__parse_sample(const event_t *event, struct perf_session *session,
879 struct sample_data *data)
880 {
881 const u64 *array;
882 u64 type;
883
884 if (event->header.type != PERF_RECORD_SAMPLE)
885 return event__parse_id_sample(event, session, data);
886
887 array = event->sample.array;
888 type = session->sample_type;
889
890 if (type & PERF_SAMPLE_IP) {
891 data->ip = event->ip.ip;
892 array++;
893 }
894
895 if (type & PERF_SAMPLE_TID) {
896 u32 *p = (u32 *)array;
897 data->pid = p[0];
898 data->tid = p[1];
899 array++;
900 }
901
902 if (type & PERF_SAMPLE_TIME) {
903 data->time = *array;
904 array++;
905 }
906
907 if (type & PERF_SAMPLE_ADDR) {
908 data->addr = *array;
909 array++;
910 }
911
912 data->id = -1ULL;
913 if (type & PERF_SAMPLE_ID) {
914 data->id = *array;
915 array++;
916 }
917
918 if (type & PERF_SAMPLE_STREAM_ID) {
919 data->stream_id = *array;
920 array++;
921 }
922
923 if (type & PERF_SAMPLE_CPU) {
924 u32 *p = (u32 *)array;
925 data->cpu = *p;
926 array++;
927 } else
928 data->cpu = -1;
929
930 if (type & PERF_SAMPLE_PERIOD) {
931 data->period = *array;
932 array++;
933 }
934
935 if (type & PERF_SAMPLE_READ) {
936 pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
937 return -1;
938 }
939
940 if (type & PERF_SAMPLE_CALLCHAIN) {
941 data->callchain = (struct ip_callchain *)array;
942 array += 1 + data->callchain->nr;
943 }
944
945 if (type & PERF_SAMPLE_RAW) {
946 u32 *p = (u32 *)array;
947 data->raw_size = *p;
948 p++;
949 data->raw_data = p;
950 }
951
952 return 0;
953 }
This page took 0.107535 seconds and 5 git commands to generate.