perf tools: Kill event_t typedef, use 'union perf_event' instead
[deliverable/linux.git] / tools / perf / util / event.c
1 #include <linux/types.h>
2 #include "event.h"
3 #include "debug.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "string.h"
7 #include "strlist.h"
8 #include "thread.h"
9
10 static const char *perf_event__names[] = {
11 [0] = "TOTAL",
12 [PERF_RECORD_MMAP] = "MMAP",
13 [PERF_RECORD_LOST] = "LOST",
14 [PERF_RECORD_COMM] = "COMM",
15 [PERF_RECORD_EXIT] = "EXIT",
16 [PERF_RECORD_THROTTLE] = "THROTTLE",
17 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
18 [PERF_RECORD_FORK] = "FORK",
19 [PERF_RECORD_READ] = "READ",
20 [PERF_RECORD_SAMPLE] = "SAMPLE",
21 [PERF_RECORD_HEADER_ATTR] = "ATTR",
22 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
23 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
24 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
25 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
26 };
27
28 const char *perf_event__name(unsigned int id)
29 {
30 if (id >= ARRAY_SIZE(perf_event__names))
31 return "INVALID";
32 if (!perf_event__names[id])
33 return "UNKNOWN";
34 return perf_event__names[id];
35 }
36
37 static struct perf_sample synth_sample = {
38 .pid = -1,
39 .tid = -1,
40 .time = -1,
41 .stream_id = -1,
42 .cpu = -1,
43 .period = 1,
44 };
45
46 static pid_t perf_event__synthesize_comm(union perf_event *event, pid_t pid,
47 int full, perf_event__handler_t process,
48 struct perf_session *session)
49 {
50 char filename[PATH_MAX];
51 char bf[BUFSIZ];
52 FILE *fp;
53 size_t size = 0;
54 DIR *tasks;
55 struct dirent dirent, *next;
56 pid_t tgid = 0;
57
58 snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
59
60 fp = fopen(filename, "r");
61 if (fp == NULL) {
62 out_race:
63 /*
64 * We raced with a task exiting - just return:
65 */
66 pr_debug("couldn't open %s\n", filename);
67 return 0;
68 }
69
70 memset(&event->comm, 0, sizeof(event->comm));
71
72 while (!event->comm.comm[0] || !event->comm.pid) {
73 if (fgets(bf, sizeof(bf), fp) == NULL) {
74 pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
75 goto out;
76 }
77
78 if (memcmp(bf, "Name:", 5) == 0) {
79 char *name = bf + 5;
80 while (*name && isspace(*name))
81 ++name;
82 size = strlen(name) - 1;
83 memcpy(event->comm.comm, name, size++);
84 } else if (memcmp(bf, "Tgid:", 5) == 0) {
85 char *tgids = bf + 5;
86 while (*tgids && isspace(*tgids))
87 ++tgids;
88 tgid = event->comm.pid = atoi(tgids);
89 }
90 }
91
92 event->comm.header.type = PERF_RECORD_COMM;
93 size = ALIGN(size, sizeof(u64));
94 memset(event->comm.comm + size, 0, session->id_hdr_size);
95 event->comm.header.size = (sizeof(event->comm) -
96 (sizeof(event->comm.comm) - size) +
97 session->id_hdr_size);
98 if (!full) {
99 event->comm.tid = pid;
100
101 process(event, &synth_sample, session);
102 goto out;
103 }
104
105 snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
106
107 tasks = opendir(filename);
108 if (tasks == NULL)
109 goto out_race;
110
111 while (!readdir_r(tasks, &dirent, &next) && next) {
112 char *end;
113 pid = strtol(dirent.d_name, &end, 10);
114 if (*end)
115 continue;
116
117 event->comm.tid = pid;
118
119 process(event, &synth_sample, session);
120 }
121
122 closedir(tasks);
123 out:
124 fclose(fp);
125
126 return tgid;
127 }
128
129 static int perf_event__synthesize_mmap_events(union perf_event *event,
130 pid_t pid, pid_t tgid,
131 perf_event__handler_t process,
132 struct perf_session *session)
133 {
134 char filename[PATH_MAX];
135 FILE *fp;
136
137 snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
138
139 fp = fopen(filename, "r");
140 if (fp == NULL) {
141 /*
142 * We raced with a task exiting - just return:
143 */
144 pr_debug("couldn't open %s\n", filename);
145 return -1;
146 }
147
148 event->header.type = PERF_RECORD_MMAP;
149 /*
150 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
151 */
152 event->header.misc = PERF_RECORD_MISC_USER;
153
154 while (1) {
155 char bf[BUFSIZ], *pbf = bf;
156 int n;
157 size_t size;
158 if (fgets(bf, sizeof(bf), fp) == NULL)
159 break;
160
161 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
162 n = hex2u64(pbf, &event->mmap.start);
163 if (n < 0)
164 continue;
165 pbf += n + 1;
166 n = hex2u64(pbf, &event->mmap.len);
167 if (n < 0)
168 continue;
169 pbf += n + 3;
170 if (*pbf == 'x') { /* vm_exec */
171 char *execname = strchr(bf, '/');
172
173 /* Catch VDSO */
174 if (execname == NULL)
175 execname = strstr(bf, "[vdso]");
176
177 if (execname == NULL)
178 continue;
179
180 pbf += 3;
181 n = hex2u64(pbf, &event->mmap.pgoff);
182
183 size = strlen(execname);
184 execname[size - 1] = '\0'; /* Remove \n */
185 memcpy(event->mmap.filename, execname, size);
186 size = ALIGN(size, sizeof(u64));
187 event->mmap.len -= event->mmap.start;
188 event->mmap.header.size = (sizeof(event->mmap) -
189 (sizeof(event->mmap.filename) - size));
190 memset(event->mmap.filename + size, 0, session->id_hdr_size);
191 event->mmap.header.size += session->id_hdr_size;
192 event->mmap.pid = tgid;
193 event->mmap.tid = pid;
194
195 process(event, &synth_sample, session);
196 }
197 }
198
199 fclose(fp);
200 return 0;
201 }
202
203 int perf_event__synthesize_modules(perf_event__handler_t process,
204 struct perf_session *session,
205 struct machine *machine)
206 {
207 struct rb_node *nd;
208 struct map_groups *kmaps = &machine->kmaps;
209 union perf_event *event = zalloc((sizeof(event->mmap) +
210 session->id_hdr_size));
211 if (event == NULL) {
212 pr_debug("Not enough memory synthesizing mmap event "
213 "for kernel modules\n");
214 return -1;
215 }
216
217 event->header.type = PERF_RECORD_MMAP;
218
219 /*
220 * kernel uses 0 for user space maps, see kernel/perf_event.c
221 * __perf_event_mmap
222 */
223 if (machine__is_host(machine))
224 event->header.misc = PERF_RECORD_MISC_KERNEL;
225 else
226 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
227
228 for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
229 nd; nd = rb_next(nd)) {
230 size_t size;
231 struct map *pos = rb_entry(nd, struct map, rb_node);
232
233 if (pos->dso->kernel)
234 continue;
235
236 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
237 event->mmap.header.type = PERF_RECORD_MMAP;
238 event->mmap.header.size = (sizeof(event->mmap) -
239 (sizeof(event->mmap.filename) - size));
240 memset(event->mmap.filename + size, 0, session->id_hdr_size);
241 event->mmap.header.size += session->id_hdr_size;
242 event->mmap.start = pos->start;
243 event->mmap.len = pos->end - pos->start;
244 event->mmap.pid = machine->pid;
245
246 memcpy(event->mmap.filename, pos->dso->long_name,
247 pos->dso->long_name_len + 1);
248 process(event, &synth_sample, session);
249 }
250
251 free(event);
252 return 0;
253 }
254
255 static int __event__synthesize_thread(union perf_event *comm_event,
256 union perf_event *mmap_event,
257 pid_t pid, perf_event__handler_t process,
258 struct perf_session *session)
259 {
260 pid_t tgid = perf_event__synthesize_comm(comm_event, pid, 1, process,
261 session);
262 if (tgid == -1)
263 return -1;
264 return perf_event__synthesize_mmap_events(mmap_event, pid, tgid,
265 process, session);
266 }
267
268 int perf_event__synthesize_thread(pid_t pid, perf_event__handler_t process,
269 struct perf_session *session)
270 {
271 union perf_event *comm_event, *mmap_event;
272 int err = -1;
273
274 comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
275 if (comm_event == NULL)
276 goto out;
277
278 mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
279 if (mmap_event == NULL)
280 goto out_free_comm;
281
282 err = __event__synthesize_thread(comm_event, mmap_event, pid,
283 process, session);
284 free(mmap_event);
285 out_free_comm:
286 free(comm_event);
287 out:
288 return err;
289 }
290
291 int perf_event__synthesize_threads(perf_event__handler_t process,
292 struct perf_session *session)
293 {
294 DIR *proc;
295 struct dirent dirent, *next;
296 union perf_event *comm_event, *mmap_event;
297 int err = -1;
298
299 comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
300 if (comm_event == NULL)
301 goto out;
302
303 mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
304 if (mmap_event == NULL)
305 goto out_free_comm;
306
307 proc = opendir("/proc");
308 if (proc == NULL)
309 goto out_free_mmap;
310
311 while (!readdir_r(proc, &dirent, &next) && next) {
312 char *end;
313 pid_t pid = strtol(dirent.d_name, &end, 10);
314
315 if (*end) /* only interested in proper numerical dirents */
316 continue;
317
318 __event__synthesize_thread(comm_event, mmap_event, pid,
319 process, session);
320 }
321
322 closedir(proc);
323 err = 0;
324 out_free_mmap:
325 free(mmap_event);
326 out_free_comm:
327 free(comm_event);
328 out:
329 return err;
330 }
331
332 struct process_symbol_args {
333 const char *name;
334 u64 start;
335 };
336
337 static int find_symbol_cb(void *arg, const char *name, char type,
338 u64 start, u64 end __used)
339 {
340 struct process_symbol_args *args = arg;
341
342 /*
343 * Must be a function or at least an alias, as in PARISC64, where "_text" is
344 * an 'A' to the same address as "_stext".
345 */
346 if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
347 type == 'A') || strcmp(name, args->name))
348 return 0;
349
350 args->start = start;
351 return 1;
352 }
353
354 int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
355 struct perf_session *session,
356 struct machine *machine,
357 const char *symbol_name)
358 {
359 size_t size;
360 const char *filename, *mmap_name;
361 char path[PATH_MAX];
362 char name_buff[PATH_MAX];
363 struct map *map;
364 int err;
365 /*
366 * We should get this from /sys/kernel/sections/.text, but till that is
367 * available use this, and after it is use this as a fallback for older
368 * kernels.
369 */
370 struct process_symbol_args args = { .name = symbol_name, };
371 union perf_event *event = zalloc((sizeof(event->mmap) +
372 session->id_hdr_size));
373 if (event == NULL) {
374 pr_debug("Not enough memory synthesizing mmap event "
375 "for kernel modules\n");
376 return -1;
377 }
378
379 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
380 if (machine__is_host(machine)) {
381 /*
382 * kernel uses PERF_RECORD_MISC_USER for user space maps,
383 * see kernel/perf_event.c __perf_event_mmap
384 */
385 event->header.misc = PERF_RECORD_MISC_KERNEL;
386 filename = "/proc/kallsyms";
387 } else {
388 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
389 if (machine__is_default_guest(machine))
390 filename = (char *) symbol_conf.default_guest_kallsyms;
391 else {
392 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
393 filename = path;
394 }
395 }
396
397 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
398 return -ENOENT;
399
400 map = machine->vmlinux_maps[MAP__FUNCTION];
401 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
402 "%s%s", mmap_name, symbol_name) + 1;
403 size = ALIGN(size, sizeof(u64));
404 event->mmap.header.type = PERF_RECORD_MMAP;
405 event->mmap.header.size = (sizeof(event->mmap) -
406 (sizeof(event->mmap.filename) - size) + session->id_hdr_size);
407 event->mmap.pgoff = args.start;
408 event->mmap.start = map->start;
409 event->mmap.len = map->end - event->mmap.start;
410 event->mmap.pid = machine->pid;
411
412 err = process(event, &synth_sample, session);
413 free(event);
414
415 return err;
416 }
417
418 static void thread__comm_adjust(struct thread *self, struct hists *hists)
419 {
420 char *comm = self->comm;
421
422 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
423 (!symbol_conf.comm_list ||
424 strlist__has_entry(symbol_conf.comm_list, comm))) {
425 u16 slen = strlen(comm);
426
427 if (hists__new_col_len(hists, HISTC_COMM, slen))
428 hists__set_col_len(hists, HISTC_THREAD, slen + 6);
429 }
430 }
431
432 static int thread__set_comm_adjust(struct thread *self, const char *comm,
433 struct hists *hists)
434 {
435 int ret = thread__set_comm(self, comm);
436
437 if (ret)
438 return ret;
439
440 thread__comm_adjust(self, hists);
441
442 return 0;
443 }
444
445 int perf_event__process_comm(union perf_event *event,
446 struct perf_sample *sample __used,
447 struct perf_session *session)
448 {
449 struct thread *thread = perf_session__findnew(session, event->comm.tid);
450
451 dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid);
452
453 if (thread == NULL || thread__set_comm_adjust(thread, event->comm.comm,
454 &session->hists)) {
455 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
456 return -1;
457 }
458
459 return 0;
460 }
461
462 int perf_event__process_lost(union perf_event *event,
463 struct perf_sample *sample __used,
464 struct perf_session *session)
465 {
466 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
467 event->lost.id, event->lost.lost);
468 session->hists.stats.total_lost += event->lost.lost;
469 return 0;
470 }
471
472 static void perf_event__set_kernel_mmap_len(union perf_event *event,
473 struct map **maps)
474 {
475 maps[MAP__FUNCTION]->start = event->mmap.start;
476 maps[MAP__FUNCTION]->end = event->mmap.start + event->mmap.len;
477 /*
478 * Be a bit paranoid here, some perf.data file came with
479 * a zero sized synthesized MMAP event for the kernel.
480 */
481 if (maps[MAP__FUNCTION]->end == 0)
482 maps[MAP__FUNCTION]->end = ~0ULL;
483 }
484
485 static int perf_event__process_kernel_mmap(union perf_event *event,
486 struct perf_session *session)
487 {
488 struct map *map;
489 char kmmap_prefix[PATH_MAX];
490 struct machine *machine;
491 enum dso_kernel_type kernel_type;
492 bool is_kernel_mmap;
493
494 machine = perf_session__findnew_machine(session, event->mmap.pid);
495 if (!machine) {
496 pr_err("Can't find id %d's machine\n", event->mmap.pid);
497 goto out_problem;
498 }
499
500 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
501 if (machine__is_host(machine))
502 kernel_type = DSO_TYPE_KERNEL;
503 else
504 kernel_type = DSO_TYPE_GUEST_KERNEL;
505
506 is_kernel_mmap = memcmp(event->mmap.filename,
507 kmmap_prefix,
508 strlen(kmmap_prefix)) == 0;
509 if (event->mmap.filename[0] == '/' ||
510 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
511
512 char short_module_name[1024];
513 char *name, *dot;
514
515 if (event->mmap.filename[0] == '/') {
516 name = strrchr(event->mmap.filename, '/');
517 if (name == NULL)
518 goto out_problem;
519
520 ++name; /* skip / */
521 dot = strrchr(name, '.');
522 if (dot == NULL)
523 goto out_problem;
524 snprintf(short_module_name, sizeof(short_module_name),
525 "[%.*s]", (int)(dot - name), name);
526 strxfrchar(short_module_name, '-', '_');
527 } else
528 strcpy(short_module_name, event->mmap.filename);
529
530 map = machine__new_module(machine, event->mmap.start,
531 event->mmap.filename);
532 if (map == NULL)
533 goto out_problem;
534
535 name = strdup(short_module_name);
536 if (name == NULL)
537 goto out_problem;
538
539 map->dso->short_name = name;
540 map->dso->sname_alloc = 1;
541 map->end = map->start + event->mmap.len;
542 } else if (is_kernel_mmap) {
543 const char *symbol_name = (event->mmap.filename +
544 strlen(kmmap_prefix));
545 /*
546 * Should be there already, from the build-id table in
547 * the header.
548 */
549 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
550 kmmap_prefix);
551 if (kernel == NULL)
552 goto out_problem;
553
554 kernel->kernel = kernel_type;
555 if (__machine__create_kernel_maps(machine, kernel) < 0)
556 goto out_problem;
557
558 perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps);
559 perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
560 symbol_name,
561 event->mmap.pgoff);
562 if (machine__is_default_guest(machine)) {
563 /*
564 * preload dso of guest kernel and modules
565 */
566 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
567 NULL);
568 }
569 }
570 return 0;
571 out_problem:
572 return -1;
573 }
574
575 int perf_event__process_mmap(union perf_event *event,
576 struct perf_sample *sample __used,
577 struct perf_session *session)
578 {
579 struct machine *machine;
580 struct thread *thread;
581 struct map *map;
582 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
583 int ret = 0;
584
585 dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
586 event->mmap.pid, event->mmap.tid, event->mmap.start,
587 event->mmap.len, event->mmap.pgoff, event->mmap.filename);
588
589 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
590 cpumode == PERF_RECORD_MISC_KERNEL) {
591 ret = perf_event__process_kernel_mmap(event, session);
592 if (ret < 0)
593 goto out_problem;
594 return 0;
595 }
596
597 machine = perf_session__find_host_machine(session);
598 if (machine == NULL)
599 goto out_problem;
600 thread = perf_session__findnew(session, event->mmap.pid);
601 if (thread == NULL)
602 goto out_problem;
603 map = map__new(&machine->user_dsos, event->mmap.start,
604 event->mmap.len, event->mmap.pgoff,
605 event->mmap.pid, event->mmap.filename,
606 MAP__FUNCTION);
607 if (map == NULL)
608 goto out_problem;
609
610 thread__insert_map(thread, map);
611 return 0;
612
613 out_problem:
614 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
615 return 0;
616 }
617
618 int perf_event__process_task(union perf_event *event,
619 struct perf_sample *sample __used,
620 struct perf_session *session)
621 {
622 struct thread *thread = perf_session__findnew(session, event->fork.tid);
623 struct thread *parent = perf_session__findnew(session, event->fork.ptid);
624
625 dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
626 event->fork.ppid, event->fork.ptid);
627
628 if (event->header.type == PERF_RECORD_EXIT) {
629 perf_session__remove_thread(session, thread);
630 return 0;
631 }
632
633 if (thread == NULL || parent == NULL ||
634 thread__fork(thread, parent) < 0) {
635 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
636 return -1;
637 }
638
639 return 0;
640 }
641
642 int perf_event__process(union perf_event *event, struct perf_sample *sample,
643 struct perf_session *session)
644 {
645 switch (event->header.type) {
646 case PERF_RECORD_COMM:
647 perf_event__process_comm(event, sample, session);
648 break;
649 case PERF_RECORD_MMAP:
650 perf_event__process_mmap(event, sample, session);
651 break;
652 case PERF_RECORD_FORK:
653 case PERF_RECORD_EXIT:
654 perf_event__process_task(event, sample, session);
655 break;
656 case PERF_RECORD_LOST:
657 perf_event__process_lost(event, sample, session);
658 default:
659 break;
660 }
661
662 return 0;
663 }
664
665 void thread__find_addr_map(struct thread *self,
666 struct perf_session *session, u8 cpumode,
667 enum map_type type, pid_t pid, u64 addr,
668 struct addr_location *al)
669 {
670 struct map_groups *mg = &self->mg;
671 struct machine *machine = NULL;
672
673 al->thread = self;
674 al->addr = addr;
675 al->cpumode = cpumode;
676 al->filtered = false;
677
678 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
679 al->level = 'k';
680 machine = perf_session__find_host_machine(session);
681 if (machine == NULL) {
682 al->map = NULL;
683 return;
684 }
685 mg = &machine->kmaps;
686 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
687 al->level = '.';
688 machine = perf_session__find_host_machine(session);
689 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
690 al->level = 'g';
691 machine = perf_session__find_machine(session, pid);
692 if (machine == NULL) {
693 al->map = NULL;
694 return;
695 }
696 mg = &machine->kmaps;
697 } else {
698 /*
699 * 'u' means guest os user space.
700 * TODO: We don't support guest user space. Might support late.
701 */
702 if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
703 al->level = 'u';
704 else
705 al->level = 'H';
706 al->map = NULL;
707
708 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
709 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
710 !perf_guest)
711 al->filtered = true;
712 if ((cpumode == PERF_RECORD_MISC_USER ||
713 cpumode == PERF_RECORD_MISC_KERNEL) &&
714 !perf_host)
715 al->filtered = true;
716
717 return;
718 }
719 try_again:
720 al->map = map_groups__find(mg, type, al->addr);
721 if (al->map == NULL) {
722 /*
723 * If this is outside of all known maps, and is a negative
724 * address, try to look it up in the kernel dso, as it might be
725 * a vsyscall or vdso (which executes in user-mode).
726 *
727 * XXX This is nasty, we should have a symbol list in the
728 * "[vdso]" dso, but for now lets use the old trick of looking
729 * in the whole kernel symbol list.
730 */
731 if ((long long)al->addr < 0 &&
732 cpumode == PERF_RECORD_MISC_KERNEL &&
733 machine && mg != &machine->kmaps) {
734 mg = &machine->kmaps;
735 goto try_again;
736 }
737 } else
738 al->addr = al->map->map_ip(al->map, al->addr);
739 }
740
741 void thread__find_addr_location(struct thread *self,
742 struct perf_session *session, u8 cpumode,
743 enum map_type type, pid_t pid, u64 addr,
744 struct addr_location *al,
745 symbol_filter_t filter)
746 {
747 thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
748 if (al->map != NULL)
749 al->sym = map__find_symbol(al->map, al->addr, filter);
750 else
751 al->sym = NULL;
752 }
753
754 static void dso__calc_col_width(struct dso *self, struct hists *hists)
755 {
756 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
757 (!symbol_conf.dso_list ||
758 strlist__has_entry(symbol_conf.dso_list, self->name))) {
759 u16 slen = dso__name_len(self);
760 hists__new_col_len(hists, HISTC_DSO, slen);
761 }
762
763 self->slen_calculated = 1;
764 }
765
766 int perf_event__preprocess_sample(const union perf_event *event,
767 struct perf_session *session,
768 struct addr_location *al,
769 struct perf_sample *sample,
770 symbol_filter_t filter)
771 {
772 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
773 struct thread *thread = perf_session__findnew(session, event->ip.pid);
774
775 if (thread == NULL)
776 return -1;
777
778 if (symbol_conf.comm_list &&
779 !strlist__has_entry(symbol_conf.comm_list, thread->comm))
780 goto out_filtered;
781
782 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
783 /*
784 * Have we already created the kernel maps for the host machine?
785 *
786 * This should have happened earlier, when we processed the kernel MMAP
787 * events, but for older perf.data files there was no such thing, so do
788 * it now.
789 */
790 if (cpumode == PERF_RECORD_MISC_KERNEL &&
791 session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
792 machine__create_kernel_maps(&session->host_machine);
793
794 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
795 event->ip.pid, event->ip.ip, al);
796 dump_printf(" ...... dso: %s\n",
797 al->map ? al->map->dso->long_name :
798 al->level == 'H' ? "[hypervisor]" : "<not found>");
799 al->sym = NULL;
800 al->cpu = sample->cpu;
801
802 if (al->map) {
803 if (symbol_conf.dso_list &&
804 (!al->map || !al->map->dso ||
805 !(strlist__has_entry(symbol_conf.dso_list,
806 al->map->dso->short_name) ||
807 (al->map->dso->short_name != al->map->dso->long_name &&
808 strlist__has_entry(symbol_conf.dso_list,
809 al->map->dso->long_name)))))
810 goto out_filtered;
811 /*
812 * We have to do this here as we may have a dso with no symbol
813 * hit that has a name longer than the ones with symbols
814 * sampled.
815 */
816 if (!sort_dso.elide && !al->map->dso->slen_calculated)
817 dso__calc_col_width(al->map->dso, &session->hists);
818
819 al->sym = map__find_symbol(al->map, al->addr, filter);
820 } else {
821 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
822
823 if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width &&
824 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
825 !symbol_conf.dso_list)
826 hists__set_col_len(&session->hists, HISTC_DSO,
827 unresolved_col_width);
828 }
829
830 if (symbol_conf.sym_list && al->sym &&
831 !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
832 goto out_filtered;
833
834 return 0;
835
836 out_filtered:
837 al->filtered = true;
838 return 0;
839 }
This page took 0.087912 seconds and 5 git commands to generate.