1a367734e01693c8a93f79bb3846af4a89f9cffe
[deliverable/linux.git] / tools / perf / util / symbol.c
1 #define _GNU_SOURCE
2 #include <ctype.h>
3 #include <dirent.h>
4 #include <errno.h>
5 #include <libgen.h>
6 #include <stdlib.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <sys/param.h>
12 #include <fcntl.h>
13 #include <unistd.h>
14 #include "build-id.h"
15 #include "debug.h"
16 #include "symbol.h"
17 #include "strlist.h"
18
19 #include <libelf.h>
20 #include <gelf.h>
21 #include <elf.h>
22 #include <limits.h>
23 #include <sys/utsname.h>
24
25 #ifndef NT_GNU_BUILD_ID
26 #define NT_GNU_BUILD_ID 3
27 #endif
28
29 static bool dso__build_id_equal(const struct dso *self, u8 *build_id);
30 static int elf_read_build_id(Elf *elf, void *bf, size_t size);
31 static void dsos__add(struct list_head *head, struct dso *dso);
32 static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
33 static int dso__load_kernel_sym(struct dso *self, struct map *map,
34 symbol_filter_t filter);
35 static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
36 symbol_filter_t filter);
37 static int vmlinux_path__nr_entries;
38 static char **vmlinux_path;
39
40 struct symbol_conf symbol_conf = {
41 .exclude_other = true,
42 .use_modules = true,
43 .try_vmlinux_path = true,
44 };
45
46 int dso__name_len(const struct dso *self)
47 {
48 if (verbose)
49 return self->long_name_len;
50
51 return self->short_name_len;
52 }
53
54 bool dso__loaded(const struct dso *self, enum map_type type)
55 {
56 return self->loaded & (1 << type);
57 }
58
59 bool dso__sorted_by_name(const struct dso *self, enum map_type type)
60 {
61 return self->sorted_by_name & (1 << type);
62 }
63
64 static void dso__set_sorted_by_name(struct dso *self, enum map_type type)
65 {
66 self->sorted_by_name |= (1 << type);
67 }
68
69 bool symbol_type__is_a(char symbol_type, enum map_type map_type)
70 {
71 switch (map_type) {
72 case MAP__FUNCTION:
73 return symbol_type == 'T' || symbol_type == 'W';
74 case MAP__VARIABLE:
75 return symbol_type == 'D' || symbol_type == 'd';
76 default:
77 return false;
78 }
79 }
80
81 static void symbols__fixup_end(struct rb_root *self)
82 {
83 struct rb_node *nd, *prevnd = rb_first(self);
84 struct symbol *curr, *prev;
85
86 if (prevnd == NULL)
87 return;
88
89 curr = rb_entry(prevnd, struct symbol, rb_node);
90
91 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
92 prev = curr;
93 curr = rb_entry(nd, struct symbol, rb_node);
94
95 if (prev->end == prev->start)
96 prev->end = curr->start - 1;
97 }
98
99 /* Last entry */
100 if (curr->end == curr->start)
101 curr->end = roundup(curr->start, 4096);
102 }
103
104 static void __map_groups__fixup_end(struct map_groups *self, enum map_type type)
105 {
106 struct map *prev, *curr;
107 struct rb_node *nd, *prevnd = rb_first(&self->maps[type]);
108
109 if (prevnd == NULL)
110 return;
111
112 curr = rb_entry(prevnd, struct map, rb_node);
113
114 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
115 prev = curr;
116 curr = rb_entry(nd, struct map, rb_node);
117 prev->end = curr->start - 1;
118 }
119
120 /*
121 * We still haven't the actual symbols, so guess the
122 * last map final address.
123 */
124 curr->end = ~0UL;
125 }
126
127 static void map_groups__fixup_end(struct map_groups *self)
128 {
129 int i;
130 for (i = 0; i < MAP__NR_TYPES; ++i)
131 __map_groups__fixup_end(self, i);
132 }
133
134 static struct symbol *symbol__new(u64 start, u64 len, u8 binding,
135 const char *name)
136 {
137 size_t namelen = strlen(name) + 1;
138 struct symbol *self = calloc(1, (symbol_conf.priv_size +
139 sizeof(*self) + namelen));
140 if (self == NULL)
141 return NULL;
142
143 if (symbol_conf.priv_size)
144 self = ((void *)self) + symbol_conf.priv_size;
145
146 self->start = start;
147 self->end = len ? start + len - 1 : start;
148 self->binding = binding;
149 self->namelen = namelen - 1;
150
151 pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
152
153 memcpy(self->name, name, namelen);
154
155 return self;
156 }
157
158 void symbol__delete(struct symbol *self)
159 {
160 free(((void *)self) - symbol_conf.priv_size);
161 }
162
163 static size_t symbol__fprintf(struct symbol *self, FILE *fp)
164 {
165 return fprintf(fp, " %llx-%llx %c %s\n",
166 self->start, self->end,
167 self->binding == STB_GLOBAL ? 'g' :
168 self->binding == STB_LOCAL ? 'l' : 'w',
169 self->name);
170 }
171
172 void dso__set_long_name(struct dso *self, char *name)
173 {
174 if (name == NULL)
175 return;
176 self->long_name = name;
177 self->long_name_len = strlen(name);
178 }
179
180 static void dso__set_short_name(struct dso *self, const char *name)
181 {
182 if (name == NULL)
183 return;
184 self->short_name = name;
185 self->short_name_len = strlen(name);
186 }
187
188 static void dso__set_basename(struct dso *self)
189 {
190 dso__set_short_name(self, basename(self->long_name));
191 }
192
193 struct dso *dso__new(const char *name)
194 {
195 struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1);
196
197 if (self != NULL) {
198 int i;
199 strcpy(self->name, name);
200 dso__set_long_name(self, self->name);
201 dso__set_short_name(self, self->name);
202 for (i = 0; i < MAP__NR_TYPES; ++i)
203 self->symbols[i] = self->symbol_names[i] = RB_ROOT;
204 self->slen_calculated = 0;
205 self->origin = DSO__ORIG_NOT_FOUND;
206 self->loaded = 0;
207 self->sorted_by_name = 0;
208 self->has_build_id = 0;
209 self->kernel = DSO_TYPE_USER;
210 INIT_LIST_HEAD(&self->node);
211 }
212
213 return self;
214 }
215
216 static void symbols__delete(struct rb_root *self)
217 {
218 struct symbol *pos;
219 struct rb_node *next = rb_first(self);
220
221 while (next) {
222 pos = rb_entry(next, struct symbol, rb_node);
223 next = rb_next(&pos->rb_node);
224 rb_erase(&pos->rb_node, self);
225 symbol__delete(pos);
226 }
227 }
228
229 void dso__delete(struct dso *self)
230 {
231 int i;
232 for (i = 0; i < MAP__NR_TYPES; ++i)
233 symbols__delete(&self->symbols[i]);
234 if (self->sname_alloc)
235 free((char *)self->short_name);
236 if (self->lname_alloc)
237 free(self->long_name);
238 free(self);
239 }
240
241 void dso__set_build_id(struct dso *self, void *build_id)
242 {
243 memcpy(self->build_id, build_id, sizeof(self->build_id));
244 self->has_build_id = 1;
245 }
246
247 static void symbols__insert(struct rb_root *self, struct symbol *sym)
248 {
249 struct rb_node **p = &self->rb_node;
250 struct rb_node *parent = NULL;
251 const u64 ip = sym->start;
252 struct symbol *s;
253
254 while (*p != NULL) {
255 parent = *p;
256 s = rb_entry(parent, struct symbol, rb_node);
257 if (ip < s->start)
258 p = &(*p)->rb_left;
259 else
260 p = &(*p)->rb_right;
261 }
262 rb_link_node(&sym->rb_node, parent, p);
263 rb_insert_color(&sym->rb_node, self);
264 }
265
266 static struct symbol *symbols__find(struct rb_root *self, u64 ip)
267 {
268 struct rb_node *n;
269
270 if (self == NULL)
271 return NULL;
272
273 n = self->rb_node;
274
275 while (n) {
276 struct symbol *s = rb_entry(n, struct symbol, rb_node);
277
278 if (ip < s->start)
279 n = n->rb_left;
280 else if (ip > s->end)
281 n = n->rb_right;
282 else
283 return s;
284 }
285
286 return NULL;
287 }
288
289 struct symbol_name_rb_node {
290 struct rb_node rb_node;
291 struct symbol sym;
292 };
293
294 static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym)
295 {
296 struct rb_node **p = &self->rb_node;
297 struct rb_node *parent = NULL;
298 struct symbol_name_rb_node *symn = ((void *)sym) - sizeof(*parent), *s;
299
300 while (*p != NULL) {
301 parent = *p;
302 s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
303 if (strcmp(sym->name, s->sym.name) < 0)
304 p = &(*p)->rb_left;
305 else
306 p = &(*p)->rb_right;
307 }
308 rb_link_node(&symn->rb_node, parent, p);
309 rb_insert_color(&symn->rb_node, self);
310 }
311
312 static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source)
313 {
314 struct rb_node *nd;
315
316 for (nd = rb_first(source); nd; nd = rb_next(nd)) {
317 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
318 symbols__insert_by_name(self, pos);
319 }
320 }
321
322 static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name)
323 {
324 struct rb_node *n;
325
326 if (self == NULL)
327 return NULL;
328
329 n = self->rb_node;
330
331 while (n) {
332 struct symbol_name_rb_node *s;
333 int cmp;
334
335 s = rb_entry(n, struct symbol_name_rb_node, rb_node);
336 cmp = strcmp(name, s->sym.name);
337
338 if (cmp < 0)
339 n = n->rb_left;
340 else if (cmp > 0)
341 n = n->rb_right;
342 else
343 return &s->sym;
344 }
345
346 return NULL;
347 }
348
349 struct symbol *dso__find_symbol(struct dso *self,
350 enum map_type type, u64 addr)
351 {
352 return symbols__find(&self->symbols[type], addr);
353 }
354
355 struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
356 const char *name)
357 {
358 return symbols__find_by_name(&self->symbol_names[type], name);
359 }
360
361 void dso__sort_by_name(struct dso *self, enum map_type type)
362 {
363 dso__set_sorted_by_name(self, type);
364 return symbols__sort_by_name(&self->symbol_names[type],
365 &self->symbols[type]);
366 }
367
368 int build_id__sprintf(const u8 *self, int len, char *bf)
369 {
370 char *bid = bf;
371 const u8 *raw = self;
372 int i;
373
374 for (i = 0; i < len; ++i) {
375 sprintf(bid, "%02x", *raw);
376 ++raw;
377 bid += 2;
378 }
379
380 return raw - self;
381 }
382
383 size_t dso__fprintf_buildid(struct dso *self, FILE *fp)
384 {
385 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
386
387 build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id);
388 return fprintf(fp, "%s", sbuild_id);
389 }
390
391 size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
392 {
393 struct rb_node *nd;
394 size_t ret = fprintf(fp, "dso: %s (", self->short_name);
395
396 if (self->short_name != self->long_name)
397 ret += fprintf(fp, "%s, ", self->long_name);
398 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
399 self->loaded ? "" : "NOT ");
400 ret += dso__fprintf_buildid(self, fp);
401 ret += fprintf(fp, ")\n");
402 for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) {
403 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
404 ret += symbol__fprintf(pos, fp);
405 }
406
407 return ret;
408 }
409
410 int kallsyms__parse(const char *filename, void *arg,
411 int (*process_symbol)(void *arg, const char *name,
412 char type, u64 start))
413 {
414 char *line = NULL;
415 size_t n;
416 int err = 0;
417 FILE *file = fopen(filename, "r");
418
419 if (file == NULL)
420 goto out_failure;
421
422 while (!feof(file)) {
423 u64 start;
424 int line_len, len;
425 char symbol_type;
426 char *symbol_name;
427
428 line_len = getline(&line, &n, file);
429 if (line_len < 0 || !line)
430 break;
431
432 line[--line_len] = '\0'; /* \n */
433
434 len = hex2u64(line, &start);
435
436 len++;
437 if (len + 2 >= line_len)
438 continue;
439
440 symbol_type = toupper(line[len]);
441 symbol_name = line + len + 2;
442
443 err = process_symbol(arg, symbol_name, symbol_type, start);
444 if (err)
445 break;
446 }
447
448 free(line);
449 fclose(file);
450 return err;
451
452 out_failure:
453 return -1;
454 }
455
456 struct process_kallsyms_args {
457 struct map *map;
458 struct dso *dso;
459 };
460
461 static u8 kallsyms2elf_type(char type)
462 {
463 if (type == 'W')
464 return STB_WEAK;
465
466 return isupper(type) ? STB_GLOBAL : STB_LOCAL;
467 }
468
469 static int map__process_kallsym_symbol(void *arg, const char *name,
470 char type, u64 start)
471 {
472 struct symbol *sym;
473 struct process_kallsyms_args *a = arg;
474 struct rb_root *root = &a->dso->symbols[a->map->type];
475
476 if (!symbol_type__is_a(type, a->map->type))
477 return 0;
478
479 /*
480 * Will fix up the end later, when we have all symbols sorted.
481 */
482 sym = symbol__new(start, 0, kallsyms2elf_type(type), name);
483
484 if (sym == NULL)
485 return -ENOMEM;
486 /*
487 * We will pass the symbols to the filter later, in
488 * map__split_kallsyms, when we have split the maps per module
489 */
490 symbols__insert(root, sym);
491
492 return 0;
493 }
494
495 /*
496 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
497 * so that we can in the next step set the symbol ->end address and then
498 * call kernel_maps__split_kallsyms.
499 */
500 static int dso__load_all_kallsyms(struct dso *self, const char *filename,
501 struct map *map)
502 {
503 struct process_kallsyms_args args = { .map = map, .dso = self, };
504 return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
505 }
506
507 /*
508 * Split the symbols into maps, making sure there are no overlaps, i.e. the
509 * kernel range is broken in several maps, named [kernel].N, as we don't have
510 * the original ELF section names vmlinux have.
511 */
512 static int dso__split_kallsyms(struct dso *self, struct map *map,
513 symbol_filter_t filter)
514 {
515 struct map_groups *kmaps = map__kmap(map)->kmaps;
516 struct machine *machine = kmaps->machine;
517 struct map *curr_map = map;
518 struct symbol *pos;
519 int count = 0;
520 struct rb_root *root = &self->symbols[map->type];
521 struct rb_node *next = rb_first(root);
522 int kernel_range = 0;
523
524 while (next) {
525 char *module;
526
527 pos = rb_entry(next, struct symbol, rb_node);
528 next = rb_next(&pos->rb_node);
529
530 module = strchr(pos->name, '\t');
531 if (module) {
532 if (!symbol_conf.use_modules)
533 goto discard_symbol;
534
535 *module++ = '\0';
536
537 if (strcmp(curr_map->dso->short_name, module)) {
538 if (curr_map != map &&
539 self->kernel == DSO_TYPE_GUEST_KERNEL &&
540 machine__is_default_guest(machine)) {
541 /*
542 * We assume all symbols of a module are
543 * continuous in * kallsyms, so curr_map
544 * points to a module and all its
545 * symbols are in its kmap. Mark it as
546 * loaded.
547 */
548 dso__set_loaded(curr_map->dso,
549 curr_map->type);
550 }
551
552 curr_map = map_groups__find_by_name(kmaps,
553 map->type, module);
554 if (curr_map == NULL) {
555 pr_debug("%s/proc/{kallsyms,modules} "
556 "inconsistency while looking "
557 "for \"%s\" module!\n",
558 machine->root_dir, module);
559 curr_map = map;
560 goto discard_symbol;
561 }
562
563 if (curr_map->dso->loaded &&
564 !machine__is_default_guest(machine))
565 goto discard_symbol;
566 }
567 /*
568 * So that we look just like we get from .ko files,
569 * i.e. not prelinked, relative to map->start.
570 */
571 pos->start = curr_map->map_ip(curr_map, pos->start);
572 pos->end = curr_map->map_ip(curr_map, pos->end);
573 } else if (curr_map != map) {
574 char dso_name[PATH_MAX];
575 struct dso *dso;
576
577 if (self->kernel == DSO_TYPE_GUEST_KERNEL)
578 snprintf(dso_name, sizeof(dso_name),
579 "[guest.kernel].%d",
580 kernel_range++);
581 else
582 snprintf(dso_name, sizeof(dso_name),
583 "[kernel].%d",
584 kernel_range++);
585
586 dso = dso__new(dso_name);
587 if (dso == NULL)
588 return -1;
589
590 dso->kernel = self->kernel;
591
592 curr_map = map__new2(pos->start, dso, map->type);
593 if (curr_map == NULL) {
594 dso__delete(dso);
595 return -1;
596 }
597
598 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
599 map_groups__insert(kmaps, curr_map);
600 ++kernel_range;
601 }
602
603 if (filter && filter(curr_map, pos)) {
604 discard_symbol: rb_erase(&pos->rb_node, root);
605 symbol__delete(pos);
606 } else {
607 if (curr_map != map) {
608 rb_erase(&pos->rb_node, root);
609 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
610 }
611 count++;
612 }
613 }
614
615 if (curr_map != map &&
616 self->kernel == DSO_TYPE_GUEST_KERNEL &&
617 machine__is_default_guest(kmaps->machine)) {
618 dso__set_loaded(curr_map->dso, curr_map->type);
619 }
620
621 return count;
622 }
623
624 int dso__load_kallsyms(struct dso *self, const char *filename,
625 struct map *map, symbol_filter_t filter)
626 {
627 if (dso__load_all_kallsyms(self, filename, map) < 0)
628 return -1;
629
630 symbols__fixup_end(&self->symbols[map->type]);
631 if (self->kernel == DSO_TYPE_GUEST_KERNEL)
632 self->origin = DSO__ORIG_GUEST_KERNEL;
633 else
634 self->origin = DSO__ORIG_KERNEL;
635
636 return dso__split_kallsyms(self, map, filter);
637 }
638
639 static int dso__load_perf_map(struct dso *self, struct map *map,
640 symbol_filter_t filter)
641 {
642 char *line = NULL;
643 size_t n;
644 FILE *file;
645 int nr_syms = 0;
646
647 file = fopen(self->long_name, "r");
648 if (file == NULL)
649 goto out_failure;
650
651 while (!feof(file)) {
652 u64 start, size;
653 struct symbol *sym;
654 int line_len, len;
655
656 line_len = getline(&line, &n, file);
657 if (line_len < 0)
658 break;
659
660 if (!line)
661 goto out_failure;
662
663 line[--line_len] = '\0'; /* \n */
664
665 len = hex2u64(line, &start);
666
667 len++;
668 if (len + 2 >= line_len)
669 continue;
670
671 len += hex2u64(line + len, &size);
672
673 len++;
674 if (len + 2 >= line_len)
675 continue;
676
677 sym = symbol__new(start, size, STB_GLOBAL, line + len);
678
679 if (sym == NULL)
680 goto out_delete_line;
681
682 if (filter && filter(map, sym))
683 symbol__delete(sym);
684 else {
685 symbols__insert(&self->symbols[map->type], sym);
686 nr_syms++;
687 }
688 }
689
690 free(line);
691 fclose(file);
692
693 return nr_syms;
694
695 out_delete_line:
696 free(line);
697 out_failure:
698 return -1;
699 }
700
701 /**
702 * elf_symtab__for_each_symbol - iterate thru all the symbols
703 *
704 * @self: struct elf_symtab instance to iterate
705 * @idx: uint32_t idx
706 * @sym: GElf_Sym iterator
707 */
708 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
709 for (idx = 0, gelf_getsym(syms, idx, &sym);\
710 idx < nr_syms; \
711 idx++, gelf_getsym(syms, idx, &sym))
712
713 static inline uint8_t elf_sym__type(const GElf_Sym *sym)
714 {
715 return GELF_ST_TYPE(sym->st_info);
716 }
717
718 static inline int elf_sym__is_function(const GElf_Sym *sym)
719 {
720 return elf_sym__type(sym) == STT_FUNC &&
721 sym->st_name != 0 &&
722 sym->st_shndx != SHN_UNDEF;
723 }
724
725 static inline bool elf_sym__is_object(const GElf_Sym *sym)
726 {
727 return elf_sym__type(sym) == STT_OBJECT &&
728 sym->st_name != 0 &&
729 sym->st_shndx != SHN_UNDEF;
730 }
731
732 static inline int elf_sym__is_label(const GElf_Sym *sym)
733 {
734 return elf_sym__type(sym) == STT_NOTYPE &&
735 sym->st_name != 0 &&
736 sym->st_shndx != SHN_UNDEF &&
737 sym->st_shndx != SHN_ABS;
738 }
739
740 static inline const char *elf_sec__name(const GElf_Shdr *shdr,
741 const Elf_Data *secstrs)
742 {
743 return secstrs->d_buf + shdr->sh_name;
744 }
745
746 static inline int elf_sec__is_text(const GElf_Shdr *shdr,
747 const Elf_Data *secstrs)
748 {
749 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
750 }
751
752 static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
753 const Elf_Data *secstrs)
754 {
755 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
756 }
757
758 static inline const char *elf_sym__name(const GElf_Sym *sym,
759 const Elf_Data *symstrs)
760 {
761 return symstrs->d_buf + sym->st_name;
762 }
763
764 static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
765 GElf_Shdr *shp, const char *name,
766 size_t *idx)
767 {
768 Elf_Scn *sec = NULL;
769 size_t cnt = 1;
770
771 while ((sec = elf_nextscn(elf, sec)) != NULL) {
772 char *str;
773
774 gelf_getshdr(sec, shp);
775 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
776 if (!strcmp(name, str)) {
777 if (idx)
778 *idx = cnt;
779 break;
780 }
781 ++cnt;
782 }
783
784 return sec;
785 }
786
787 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
788 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
789 idx < nr_entries; \
790 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
791
792 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
793 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
794 idx < nr_entries; \
795 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
796
797 /*
798 * We need to check if we have a .dynsym, so that we can handle the
799 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
800 * .dynsym or .symtab).
801 * And always look at the original dso, not at debuginfo packages, that
802 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
803 */
804 static int dso__synthesize_plt_symbols(struct dso *self, struct map *map,
805 symbol_filter_t filter)
806 {
807 uint32_t nr_rel_entries, idx;
808 GElf_Sym sym;
809 u64 plt_offset;
810 GElf_Shdr shdr_plt;
811 struct symbol *f;
812 GElf_Shdr shdr_rel_plt, shdr_dynsym;
813 Elf_Data *reldata, *syms, *symstrs;
814 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
815 size_t dynsym_idx;
816 GElf_Ehdr ehdr;
817 char sympltname[1024];
818 Elf *elf;
819 int nr = 0, symidx, fd, err = 0;
820
821 fd = open(self->long_name, O_RDONLY);
822 if (fd < 0)
823 goto out;
824
825 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
826 if (elf == NULL)
827 goto out_close;
828
829 if (gelf_getehdr(elf, &ehdr) == NULL)
830 goto out_elf_end;
831
832 scn_dynsym = elf_section_by_name(elf, &ehdr, &shdr_dynsym,
833 ".dynsym", &dynsym_idx);
834 if (scn_dynsym == NULL)
835 goto out_elf_end;
836
837 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
838 ".rela.plt", NULL);
839 if (scn_plt_rel == NULL) {
840 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
841 ".rel.plt", NULL);
842 if (scn_plt_rel == NULL)
843 goto out_elf_end;
844 }
845
846 err = -1;
847
848 if (shdr_rel_plt.sh_link != dynsym_idx)
849 goto out_elf_end;
850
851 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
852 goto out_elf_end;
853
854 /*
855 * Fetch the relocation section to find the idxes to the GOT
856 * and the symbols in the .dynsym they refer to.
857 */
858 reldata = elf_getdata(scn_plt_rel, NULL);
859 if (reldata == NULL)
860 goto out_elf_end;
861
862 syms = elf_getdata(scn_dynsym, NULL);
863 if (syms == NULL)
864 goto out_elf_end;
865
866 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
867 if (scn_symstrs == NULL)
868 goto out_elf_end;
869
870 symstrs = elf_getdata(scn_symstrs, NULL);
871 if (symstrs == NULL)
872 goto out_elf_end;
873
874 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
875 plt_offset = shdr_plt.sh_offset;
876
877 if (shdr_rel_plt.sh_type == SHT_RELA) {
878 GElf_Rela pos_mem, *pos;
879
880 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
881 nr_rel_entries) {
882 symidx = GELF_R_SYM(pos->r_info);
883 plt_offset += shdr_plt.sh_entsize;
884 gelf_getsym(syms, symidx, &sym);
885 snprintf(sympltname, sizeof(sympltname),
886 "%s@plt", elf_sym__name(&sym, symstrs));
887
888 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
889 STB_GLOBAL, sympltname);
890 if (!f)
891 goto out_elf_end;
892
893 if (filter && filter(map, f))
894 symbol__delete(f);
895 else {
896 symbols__insert(&self->symbols[map->type], f);
897 ++nr;
898 }
899 }
900 } else if (shdr_rel_plt.sh_type == SHT_REL) {
901 GElf_Rel pos_mem, *pos;
902 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
903 nr_rel_entries) {
904 symidx = GELF_R_SYM(pos->r_info);
905 plt_offset += shdr_plt.sh_entsize;
906 gelf_getsym(syms, symidx, &sym);
907 snprintf(sympltname, sizeof(sympltname),
908 "%s@plt", elf_sym__name(&sym, symstrs));
909
910 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
911 STB_GLOBAL, sympltname);
912 if (!f)
913 goto out_elf_end;
914
915 if (filter && filter(map, f))
916 symbol__delete(f);
917 else {
918 symbols__insert(&self->symbols[map->type], f);
919 ++nr;
920 }
921 }
922 }
923
924 err = 0;
925 out_elf_end:
926 elf_end(elf);
927 out_close:
928 close(fd);
929
930 if (err == 0)
931 return nr;
932 out:
933 pr_debug("%s: problems reading %s PLT info.\n",
934 __func__, self->long_name);
935 return 0;
936 }
937
938 static bool elf_sym__is_a(GElf_Sym *self, enum map_type type)
939 {
940 switch (type) {
941 case MAP__FUNCTION:
942 return elf_sym__is_function(self);
943 case MAP__VARIABLE:
944 return elf_sym__is_object(self);
945 default:
946 return false;
947 }
948 }
949
950 static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type)
951 {
952 switch (type) {
953 case MAP__FUNCTION:
954 return elf_sec__is_text(self, secstrs);
955 case MAP__VARIABLE:
956 return elf_sec__is_data(self, secstrs);
957 default:
958 return false;
959 }
960 }
961
962 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
963 {
964 Elf_Scn *sec = NULL;
965 GElf_Shdr shdr;
966 size_t cnt = 1;
967
968 while ((sec = elf_nextscn(elf, sec)) != NULL) {
969 gelf_getshdr(sec, &shdr);
970
971 if ((addr >= shdr.sh_addr) &&
972 (addr < (shdr.sh_addr + shdr.sh_size)))
973 return cnt;
974
975 ++cnt;
976 }
977
978 return -1;
979 }
980
981 static int dso__load_sym(struct dso *self, struct map *map, const char *name,
982 int fd, symbol_filter_t filter, int kmodule,
983 int want_symtab)
984 {
985 struct kmap *kmap = self->kernel ? map__kmap(map) : NULL;
986 struct map *curr_map = map;
987 struct dso *curr_dso = self;
988 Elf_Data *symstrs, *secstrs;
989 uint32_t nr_syms;
990 int err = -1;
991 uint32_t idx;
992 GElf_Ehdr ehdr;
993 GElf_Shdr shdr, opdshdr;
994 Elf_Data *syms, *opddata = NULL;
995 GElf_Sym sym;
996 Elf_Scn *sec, *sec_strndx, *opdsec;
997 Elf *elf;
998 int nr = 0;
999 size_t opdidx = 0;
1000
1001 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1002 if (elf == NULL) {
1003 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
1004 goto out_close;
1005 }
1006
1007 if (gelf_getehdr(elf, &ehdr) == NULL) {
1008 pr_debug("%s: cannot get elf header.\n", __func__);
1009 goto out_elf_end;
1010 }
1011
1012 /* Always reject images with a mismatched build-id: */
1013 if (self->has_build_id) {
1014 u8 build_id[BUILD_ID_SIZE];
1015
1016 if (elf_read_build_id(elf, build_id,
1017 BUILD_ID_SIZE) != BUILD_ID_SIZE)
1018 goto out_elf_end;
1019
1020 if (!dso__build_id_equal(self, build_id))
1021 goto out_elf_end;
1022 }
1023
1024 sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL);
1025 if (sec == NULL) {
1026 if (want_symtab)
1027 goto out_elf_end;
1028
1029 sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL);
1030 if (sec == NULL)
1031 goto out_elf_end;
1032 }
1033
1034 opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx);
1035 if (opdsec)
1036 opddata = elf_rawdata(opdsec, NULL);
1037
1038 syms = elf_getdata(sec, NULL);
1039 if (syms == NULL)
1040 goto out_elf_end;
1041
1042 sec = elf_getscn(elf, shdr.sh_link);
1043 if (sec == NULL)
1044 goto out_elf_end;
1045
1046 symstrs = elf_getdata(sec, NULL);
1047 if (symstrs == NULL)
1048 goto out_elf_end;
1049
1050 sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
1051 if (sec_strndx == NULL)
1052 goto out_elf_end;
1053
1054 secstrs = elf_getdata(sec_strndx, NULL);
1055 if (secstrs == NULL)
1056 goto out_elf_end;
1057
1058 nr_syms = shdr.sh_size / shdr.sh_entsize;
1059
1060 memset(&sym, 0, sizeof(sym));
1061 if (self->kernel == DSO_TYPE_USER) {
1062 self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
1063 elf_section_by_name(elf, &ehdr, &shdr,
1064 ".gnu.prelink_undo",
1065 NULL) != NULL);
1066 } else self->adjust_symbols = 0;
1067
1068 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1069 struct symbol *f;
1070 const char *elf_name = elf_sym__name(&sym, symstrs);
1071 char *demangled = NULL;
1072 int is_label = elf_sym__is_label(&sym);
1073 const char *section_name;
1074
1075 if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
1076 strcmp(elf_name, kmap->ref_reloc_sym->name) == 0)
1077 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
1078
1079 if (!is_label && !elf_sym__is_a(&sym, map->type))
1080 continue;
1081
1082 /* Reject ARM ELF "mapping symbols": these aren't unique and
1083 * don't identify functions, so will confuse the profile
1084 * output: */
1085 if (ehdr.e_machine == EM_ARM) {
1086 if (!strcmp(elf_name, "$a") ||
1087 !strcmp(elf_name, "$d") ||
1088 !strcmp(elf_name, "$t"))
1089 continue;
1090 }
1091
1092 if (opdsec && sym.st_shndx == opdidx) {
1093 u32 offset = sym.st_value - opdshdr.sh_addr;
1094 u64 *opd = opddata->d_buf + offset;
1095 sym.st_value = *opd;
1096 sym.st_shndx = elf_addr_to_index(elf, sym.st_value);
1097 }
1098
1099 sec = elf_getscn(elf, sym.st_shndx);
1100 if (!sec)
1101 goto out_elf_end;
1102
1103 gelf_getshdr(sec, &shdr);
1104
1105 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
1106 continue;
1107
1108 section_name = elf_sec__name(&shdr, secstrs);
1109
1110 if (self->kernel != DSO_TYPE_USER || kmodule) {
1111 char dso_name[PATH_MAX];
1112
1113 if (strcmp(section_name,
1114 (curr_dso->short_name +
1115 self->short_name_len)) == 0)
1116 goto new_symbol;
1117
1118 if (strcmp(section_name, ".text") == 0) {
1119 curr_map = map;
1120 curr_dso = self;
1121 goto new_symbol;
1122 }
1123
1124 snprintf(dso_name, sizeof(dso_name),
1125 "%s%s", self->short_name, section_name);
1126
1127 curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
1128 if (curr_map == NULL) {
1129 u64 start = sym.st_value;
1130
1131 if (kmodule)
1132 start += map->start + shdr.sh_offset;
1133
1134 curr_dso = dso__new(dso_name);
1135 if (curr_dso == NULL)
1136 goto out_elf_end;
1137 curr_dso->kernel = self->kernel;
1138 curr_map = map__new2(start, curr_dso,
1139 map->type);
1140 if (curr_map == NULL) {
1141 dso__delete(curr_dso);
1142 goto out_elf_end;
1143 }
1144 curr_map->map_ip = identity__map_ip;
1145 curr_map->unmap_ip = identity__map_ip;
1146 curr_dso->origin = self->origin;
1147 map_groups__insert(kmap->kmaps, curr_map);
1148 dsos__add(&self->node, curr_dso);
1149 dso__set_loaded(curr_dso, map->type);
1150 } else
1151 curr_dso = curr_map->dso;
1152
1153 goto new_symbol;
1154 }
1155
1156 if (curr_dso->adjust_symbols) {
1157 pr_debug4("%s: adjusting symbol: st_value: %#Lx "
1158 "sh_addr: %#Lx sh_offset: %#Lx\n", __func__,
1159 (u64)sym.st_value, (u64)shdr.sh_addr,
1160 (u64)shdr.sh_offset);
1161 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1162 }
1163 /*
1164 * We need to figure out if the object was created from C++ sources
1165 * DWARF DW_compile_unit has this, but we don't always have access
1166 * to it...
1167 */
1168 demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI);
1169 if (demangled != NULL)
1170 elf_name = demangled;
1171 new_symbol:
1172 f = symbol__new(sym.st_value, sym.st_size,
1173 GELF_ST_BIND(sym.st_info), elf_name);
1174 free(demangled);
1175 if (!f)
1176 goto out_elf_end;
1177
1178 if (filter && filter(curr_map, f))
1179 symbol__delete(f);
1180 else {
1181 symbols__insert(&curr_dso->symbols[curr_map->type], f);
1182 nr++;
1183 }
1184 }
1185
1186 /*
1187 * For misannotated, zeroed, ASM function sizes.
1188 */
1189 if (nr > 0) {
1190 symbols__fixup_end(&self->symbols[map->type]);
1191 if (kmap) {
1192 /*
1193 * We need to fixup this here too because we create new
1194 * maps here, for things like vsyscall sections.
1195 */
1196 __map_groups__fixup_end(kmap->kmaps, map->type);
1197 }
1198 }
1199 err = nr;
1200 out_elf_end:
1201 elf_end(elf);
1202 out_close:
1203 return err;
1204 }
1205
1206 static bool dso__build_id_equal(const struct dso *self, u8 *build_id)
1207 {
1208 return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0;
1209 }
1210
1211 bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1212 {
1213 bool have_build_id = false;
1214 struct dso *pos;
1215
1216 list_for_each_entry(pos, head, node) {
1217 if (with_hits && !pos->hit)
1218 continue;
1219 if (pos->has_build_id) {
1220 have_build_id = true;
1221 continue;
1222 }
1223 if (filename__read_build_id(pos->long_name, pos->build_id,
1224 sizeof(pos->build_id)) > 0) {
1225 have_build_id = true;
1226 pos->has_build_id = true;
1227 }
1228 }
1229
1230 return have_build_id;
1231 }
1232
1233 /*
1234 * Align offset to 4 bytes as needed for note name and descriptor data.
1235 */
1236 #define NOTE_ALIGN(n) (((n) + 3) & -4U)
1237
1238 static int elf_read_build_id(Elf *elf, void *bf, size_t size)
1239 {
1240 int err = -1;
1241 GElf_Ehdr ehdr;
1242 GElf_Shdr shdr;
1243 Elf_Data *data;
1244 Elf_Scn *sec;
1245 Elf_Kind ek;
1246 void *ptr;
1247
1248 if (size < BUILD_ID_SIZE)
1249 goto out;
1250
1251 ek = elf_kind(elf);
1252 if (ek != ELF_K_ELF)
1253 goto out;
1254
1255 if (gelf_getehdr(elf, &ehdr) == NULL) {
1256 pr_err("%s: cannot get elf header.\n", __func__);
1257 goto out;
1258 }
1259
1260 sec = elf_section_by_name(elf, &ehdr, &shdr,
1261 ".note.gnu.build-id", NULL);
1262 if (sec == NULL) {
1263 sec = elf_section_by_name(elf, &ehdr, &shdr,
1264 ".notes", NULL);
1265 if (sec == NULL)
1266 goto out;
1267 }
1268
1269 data = elf_getdata(sec, NULL);
1270 if (data == NULL)
1271 goto out;
1272
1273 ptr = data->d_buf;
1274 while (ptr < (data->d_buf + data->d_size)) {
1275 GElf_Nhdr *nhdr = ptr;
1276 int namesz = NOTE_ALIGN(nhdr->n_namesz),
1277 descsz = NOTE_ALIGN(nhdr->n_descsz);
1278 const char *name;
1279
1280 ptr += sizeof(*nhdr);
1281 name = ptr;
1282 ptr += namesz;
1283 if (nhdr->n_type == NT_GNU_BUILD_ID &&
1284 nhdr->n_namesz == sizeof("GNU")) {
1285 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
1286 memcpy(bf, ptr, BUILD_ID_SIZE);
1287 err = BUILD_ID_SIZE;
1288 break;
1289 }
1290 }
1291 ptr += descsz;
1292 }
1293
1294 out:
1295 return err;
1296 }
1297
1298 int filename__read_build_id(const char *filename, void *bf, size_t size)
1299 {
1300 int fd, err = -1;
1301 Elf *elf;
1302
1303 if (size < BUILD_ID_SIZE)
1304 goto out;
1305
1306 fd = open(filename, O_RDONLY);
1307 if (fd < 0)
1308 goto out;
1309
1310 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1311 if (elf == NULL) {
1312 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
1313 goto out_close;
1314 }
1315
1316 err = elf_read_build_id(elf, bf, size);
1317
1318 elf_end(elf);
1319 out_close:
1320 close(fd);
1321 out:
1322 return err;
1323 }
1324
1325 int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
1326 {
1327 int fd, err = -1;
1328
1329 if (size < BUILD_ID_SIZE)
1330 goto out;
1331
1332 fd = open(filename, O_RDONLY);
1333 if (fd < 0)
1334 goto out;
1335
1336 while (1) {
1337 char bf[BUFSIZ];
1338 GElf_Nhdr nhdr;
1339 int namesz, descsz;
1340
1341 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
1342 break;
1343
1344 namesz = NOTE_ALIGN(nhdr.n_namesz);
1345 descsz = NOTE_ALIGN(nhdr.n_descsz);
1346 if (nhdr.n_type == NT_GNU_BUILD_ID &&
1347 nhdr.n_namesz == sizeof("GNU")) {
1348 if (read(fd, bf, namesz) != namesz)
1349 break;
1350 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
1351 if (read(fd, build_id,
1352 BUILD_ID_SIZE) == BUILD_ID_SIZE) {
1353 err = 0;
1354 break;
1355 }
1356 } else if (read(fd, bf, descsz) != descsz)
1357 break;
1358 } else {
1359 int n = namesz + descsz;
1360 if (read(fd, bf, n) != n)
1361 break;
1362 }
1363 }
1364 close(fd);
1365 out:
1366 return err;
1367 }
1368
1369 char dso__symtab_origin(const struct dso *self)
1370 {
1371 static const char origin[] = {
1372 [DSO__ORIG_KERNEL] = 'k',
1373 [DSO__ORIG_JAVA_JIT] = 'j',
1374 [DSO__ORIG_BUILD_ID_CACHE] = 'B',
1375 [DSO__ORIG_FEDORA] = 'f',
1376 [DSO__ORIG_UBUNTU] = 'u',
1377 [DSO__ORIG_BUILDID] = 'b',
1378 [DSO__ORIG_DSO] = 'd',
1379 [DSO__ORIG_KMODULE] = 'K',
1380 [DSO__ORIG_GUEST_KERNEL] = 'g',
1381 [DSO__ORIG_GUEST_KMODULE] = 'G',
1382 };
1383
1384 if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND)
1385 return '!';
1386 return origin[self->origin];
1387 }
1388
1389 int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
1390 {
1391 int size = PATH_MAX;
1392 char *name;
1393 int ret = -1;
1394 int fd;
1395 struct machine *machine;
1396 const char *root_dir;
1397 int want_symtab;
1398
1399 dso__set_loaded(self, map->type);
1400
1401 if (self->kernel == DSO_TYPE_KERNEL)
1402 return dso__load_kernel_sym(self, map, filter);
1403 else if (self->kernel == DSO_TYPE_GUEST_KERNEL)
1404 return dso__load_guest_kernel_sym(self, map, filter);
1405
1406 if (map->groups && map->groups->machine)
1407 machine = map->groups->machine;
1408 else
1409 machine = NULL;
1410
1411 name = malloc(size);
1412 if (!name)
1413 return -1;
1414
1415 self->adjust_symbols = 0;
1416
1417 if (strncmp(self->name, "/tmp/perf-", 10) == 0) {
1418 ret = dso__load_perf_map(self, map, filter);
1419 self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT :
1420 DSO__ORIG_NOT_FOUND;
1421 return ret;
1422 }
1423
1424 /* Iterate over candidate debug images.
1425 * On the first pass, only load images if they have a full symtab.
1426 * Failing that, do a second pass where we accept .dynsym also
1427 */
1428 for (self->origin = DSO__ORIG_BUILD_ID_CACHE, want_symtab = 1;
1429 self->origin != DSO__ORIG_NOT_FOUND;
1430 self->origin++) {
1431 switch (self->origin) {
1432 case DSO__ORIG_BUILD_ID_CACHE:
1433 if (dso__build_id_filename(self, name, size) == NULL)
1434 continue;
1435 break;
1436 case DSO__ORIG_FEDORA:
1437 snprintf(name, size, "/usr/lib/debug%s.debug",
1438 self->long_name);
1439 break;
1440 case DSO__ORIG_UBUNTU:
1441 snprintf(name, size, "/usr/lib/debug%s",
1442 self->long_name);
1443 break;
1444 case DSO__ORIG_BUILDID: {
1445 char build_id_hex[BUILD_ID_SIZE * 2 + 1];
1446
1447 if (!self->has_build_id)
1448 continue;
1449
1450 build_id__sprintf(self->build_id,
1451 sizeof(self->build_id),
1452 build_id_hex);
1453 snprintf(name, size,
1454 "/usr/lib/debug/.build-id/%.2s/%s.debug",
1455 build_id_hex, build_id_hex + 2);
1456 }
1457 break;
1458 case DSO__ORIG_DSO:
1459 snprintf(name, size, "%s", self->long_name);
1460 break;
1461 case DSO__ORIG_GUEST_KMODULE:
1462 if (map->groups && map->groups->machine)
1463 root_dir = map->groups->machine->root_dir;
1464 else
1465 root_dir = "";
1466 snprintf(name, size, "%s%s", root_dir, self->long_name);
1467 break;
1468
1469 default:
1470 /*
1471 * If we wanted a full symtab but no image had one,
1472 * relax our requirements and repeat the search.
1473 */
1474 if (want_symtab) {
1475 want_symtab = 0;
1476 self->origin = DSO__ORIG_BUILD_ID_CACHE;
1477 } else
1478 continue;
1479 }
1480
1481 /* Name is now the name of the next image to try */
1482 fd = open(name, O_RDONLY);
1483 if (fd < 0)
1484 continue;
1485
1486 ret = dso__load_sym(self, map, name, fd, filter, 0,
1487 want_symtab);
1488 close(fd);
1489
1490 /*
1491 * Some people seem to have debuginfo files _WITHOUT_ debug
1492 * info!?!?
1493 */
1494 if (!ret)
1495 continue;
1496
1497 if (ret > 0) {
1498 int nr_plt = dso__synthesize_plt_symbols(self, map, filter);
1499 if (nr_plt > 0)
1500 ret += nr_plt;
1501 break;
1502 }
1503 }
1504
1505 free(name);
1506 if (ret < 0 && strstr(self->name, " (deleted)") != NULL)
1507 return 0;
1508 return ret;
1509 }
1510
1511 struct map *map_groups__find_by_name(struct map_groups *self,
1512 enum map_type type, const char *name)
1513 {
1514 struct rb_node *nd;
1515
1516 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
1517 struct map *map = rb_entry(nd, struct map, rb_node);
1518
1519 if (map->dso && strcmp(map->dso->short_name, name) == 0)
1520 return map;
1521 }
1522
1523 return NULL;
1524 }
1525
1526 static int dso__kernel_module_get_build_id(struct dso *self,
1527 const char *root_dir)
1528 {
1529 char filename[PATH_MAX];
1530 /*
1531 * kernel module short names are of the form "[module]" and
1532 * we need just "module" here.
1533 */
1534 const char *name = self->short_name + 1;
1535
1536 snprintf(filename, sizeof(filename),
1537 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1538 root_dir, (int)strlen(name) - 1, name);
1539
1540 if (sysfs__read_build_id(filename, self->build_id,
1541 sizeof(self->build_id)) == 0)
1542 self->has_build_id = true;
1543
1544 return 0;
1545 }
1546
1547 static int map_groups__set_modules_path_dir(struct map_groups *self,
1548 const char *dir_name)
1549 {
1550 struct dirent *dent;
1551 DIR *dir = opendir(dir_name);
1552 int ret = 0;
1553
1554 if (!dir) {
1555 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1556 return -1;
1557 }
1558
1559 while ((dent = readdir(dir)) != NULL) {
1560 char path[PATH_MAX];
1561 struct stat st;
1562
1563 /*sshfs might return bad dent->d_type, so we have to stat*/
1564 sprintf(path, "%s/%s", dir_name, dent->d_name);
1565 if (stat(path, &st))
1566 continue;
1567
1568 if (S_ISDIR(st.st_mode)) {
1569 if (!strcmp(dent->d_name, ".") ||
1570 !strcmp(dent->d_name, ".."))
1571 continue;
1572
1573 snprintf(path, sizeof(path), "%s/%s",
1574 dir_name, dent->d_name);
1575 ret = map_groups__set_modules_path_dir(self, path);
1576 if (ret < 0)
1577 goto out;
1578 } else {
1579 char *dot = strrchr(dent->d_name, '.'),
1580 dso_name[PATH_MAX];
1581 struct map *map;
1582 char *long_name;
1583
1584 if (dot == NULL || strcmp(dot, ".ko"))
1585 continue;
1586 snprintf(dso_name, sizeof(dso_name), "[%.*s]",
1587 (int)(dot - dent->d_name), dent->d_name);
1588
1589 strxfrchar(dso_name, '-', '_');
1590 map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name);
1591 if (map == NULL)
1592 continue;
1593
1594 snprintf(path, sizeof(path), "%s/%s",
1595 dir_name, dent->d_name);
1596
1597 long_name = strdup(path);
1598 if (long_name == NULL) {
1599 ret = -1;
1600 goto out;
1601 }
1602 dso__set_long_name(map->dso, long_name);
1603 map->dso->lname_alloc = 1;
1604 dso__kernel_module_get_build_id(map->dso, "");
1605 }
1606 }
1607
1608 out:
1609 closedir(dir);
1610 return ret;
1611 }
1612
1613 static char *get_kernel_version(const char *root_dir)
1614 {
1615 char version[PATH_MAX];
1616 FILE *file;
1617 char *name, *tmp;
1618 const char *prefix = "Linux version ";
1619
1620 sprintf(version, "%s/proc/version", root_dir);
1621 file = fopen(version, "r");
1622 if (!file)
1623 return NULL;
1624
1625 version[0] = '\0';
1626 tmp = fgets(version, sizeof(version), file);
1627 fclose(file);
1628
1629 name = strstr(version, prefix);
1630 if (!name)
1631 return NULL;
1632 name += strlen(prefix);
1633 tmp = strchr(name, ' ');
1634 if (tmp)
1635 *tmp = '\0';
1636
1637 return strdup(name);
1638 }
1639
1640 static int machine__set_modules_path(struct machine *self)
1641 {
1642 char *version;
1643 char modules_path[PATH_MAX];
1644
1645 version = get_kernel_version(self->root_dir);
1646 if (!version)
1647 return -1;
1648
1649 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
1650 self->root_dir, version);
1651 free(version);
1652
1653 return map_groups__set_modules_path_dir(&self->kmaps, modules_path);
1654 }
1655
1656 /*
1657 * Constructor variant for modules (where we know from /proc/modules where
1658 * they are loaded) and for vmlinux, where only after we load all the
1659 * symbols we'll know where it starts and ends.
1660 */
1661 static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
1662 {
1663 struct map *self = calloc(1, (sizeof(*self) +
1664 (dso->kernel ? sizeof(struct kmap) : 0)));
1665 if (self != NULL) {
1666 /*
1667 * ->end will be filled after we load all the symbols
1668 */
1669 map__init(self, type, start, 0, 0, dso);
1670 }
1671
1672 return self;
1673 }
1674
1675 struct map *machine__new_module(struct machine *self, u64 start,
1676 const char *filename)
1677 {
1678 struct map *map;
1679 struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename);
1680
1681 if (dso == NULL)
1682 return NULL;
1683
1684 map = map__new2(start, dso, MAP__FUNCTION);
1685 if (map == NULL)
1686 return NULL;
1687
1688 if (machine__is_host(self))
1689 dso->origin = DSO__ORIG_KMODULE;
1690 else
1691 dso->origin = DSO__ORIG_GUEST_KMODULE;
1692 map_groups__insert(&self->kmaps, map);
1693 return map;
1694 }
1695
1696 static int machine__create_modules(struct machine *self)
1697 {
1698 char *line = NULL;
1699 size_t n;
1700 FILE *file;
1701 struct map *map;
1702 const char *modules;
1703 char path[PATH_MAX];
1704
1705 if (machine__is_default_guest(self))
1706 modules = symbol_conf.default_guest_modules;
1707 else {
1708 sprintf(path, "%s/proc/modules", self->root_dir);
1709 modules = path;
1710 }
1711
1712 file = fopen(modules, "r");
1713 if (file == NULL)
1714 return -1;
1715
1716 while (!feof(file)) {
1717 char name[PATH_MAX];
1718 u64 start;
1719 char *sep;
1720 int line_len;
1721
1722 line_len = getline(&line, &n, file);
1723 if (line_len < 0)
1724 break;
1725
1726 if (!line)
1727 goto out_failure;
1728
1729 line[--line_len] = '\0'; /* \n */
1730
1731 sep = strrchr(line, 'x');
1732 if (sep == NULL)
1733 continue;
1734
1735 hex2u64(sep + 1, &start);
1736
1737 sep = strchr(line, ' ');
1738 if (sep == NULL)
1739 continue;
1740
1741 *sep = '\0';
1742
1743 snprintf(name, sizeof(name), "[%s]", line);
1744 map = machine__new_module(self, start, name);
1745 if (map == NULL)
1746 goto out_delete_line;
1747 dso__kernel_module_get_build_id(map->dso, self->root_dir);
1748 }
1749
1750 free(line);
1751 fclose(file);
1752
1753 return machine__set_modules_path(self);
1754
1755 out_delete_line:
1756 free(line);
1757 out_failure:
1758 return -1;
1759 }
1760
1761 static int dso__load_vmlinux(struct dso *self, struct map *map,
1762 const char *vmlinux, symbol_filter_t filter)
1763 {
1764 int err = -1, fd;
1765
1766 fd = open(vmlinux, O_RDONLY);
1767 if (fd < 0)
1768 return -1;
1769
1770 dso__set_loaded(self, map->type);
1771 err = dso__load_sym(self, map, vmlinux, fd, filter, 0, 0);
1772 close(fd);
1773
1774 if (err > 0)
1775 pr_debug("Using %s for symbols\n", vmlinux);
1776
1777 return err;
1778 }
1779
1780 int dso__load_vmlinux_path(struct dso *self, struct map *map,
1781 symbol_filter_t filter)
1782 {
1783 int i, err = 0;
1784 char *filename;
1785
1786 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1787 vmlinux_path__nr_entries + 1);
1788
1789 filename = dso__build_id_filename(self, NULL, 0);
1790 if (filename != NULL) {
1791 err = dso__load_vmlinux(self, map, filename, filter);
1792 if (err > 0) {
1793 dso__set_long_name(self, filename);
1794 goto out;
1795 }
1796 free(filename);
1797 }
1798
1799 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1800 err = dso__load_vmlinux(self, map, vmlinux_path[i], filter);
1801 if (err > 0) {
1802 dso__set_long_name(self, strdup(vmlinux_path[i]));
1803 break;
1804 }
1805 }
1806 out:
1807 return err;
1808 }
1809
1810 static int dso__load_kernel_sym(struct dso *self, struct map *map,
1811 symbol_filter_t filter)
1812 {
1813 int err;
1814 const char *kallsyms_filename = NULL;
1815 char *kallsyms_allocated_filename = NULL;
1816 /*
1817 * Step 1: if the user specified a vmlinux filename, use it and only
1818 * it, reporting errors to the user if it cannot be used.
1819 *
1820 * For instance, try to analyse an ARM perf.data file _without_ a
1821 * build-id, or if the user specifies the wrong path to the right
1822 * vmlinux file, obviously we can't fallback to another vmlinux (a
1823 * x86_86 one, on the machine where analysis is being performed, say),
1824 * or worse, /proc/kallsyms.
1825 *
1826 * If the specified file _has_ a build-id and there is a build-id
1827 * section in the perf.data file, we will still do the expected
1828 * validation in dso__load_vmlinux and will bail out if they don't
1829 * match.
1830 */
1831 if (symbol_conf.vmlinux_name != NULL) {
1832 err = dso__load_vmlinux(self, map,
1833 symbol_conf.vmlinux_name, filter);
1834 if (err > 0) {
1835 dso__set_long_name(self,
1836 strdup(symbol_conf.vmlinux_name));
1837 goto out_fixup;
1838 }
1839 return err;
1840 }
1841
1842 if (vmlinux_path != NULL) {
1843 err = dso__load_vmlinux_path(self, map, filter);
1844 if (err > 0)
1845 goto out_fixup;
1846 }
1847
1848 /*
1849 * Say the kernel DSO was created when processing the build-id header table,
1850 * we have a build-id, so check if it is the same as the running kernel,
1851 * using it if it is.
1852 */
1853 if (self->has_build_id) {
1854 u8 kallsyms_build_id[BUILD_ID_SIZE];
1855 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1856
1857 if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id,
1858 sizeof(kallsyms_build_id)) == 0) {
1859 if (dso__build_id_equal(self, kallsyms_build_id)) {
1860 kallsyms_filename = "/proc/kallsyms";
1861 goto do_kallsyms;
1862 }
1863 }
1864 /*
1865 * Now look if we have it on the build-id cache in
1866 * $HOME/.debug/[kernel.kallsyms].
1867 */
1868 build_id__sprintf(self->build_id, sizeof(self->build_id),
1869 sbuild_id);
1870
1871 if (asprintf(&kallsyms_allocated_filename,
1872 "%s/.debug/[kernel.kallsyms]/%s",
1873 getenv("HOME"), sbuild_id) == -1) {
1874 pr_err("Not enough memory for kallsyms file lookup\n");
1875 return -1;
1876 }
1877
1878 kallsyms_filename = kallsyms_allocated_filename;
1879
1880 if (access(kallsyms_filename, F_OK)) {
1881 pr_err("No kallsyms or vmlinux with build-id %s "
1882 "was found\n", sbuild_id);
1883 free(kallsyms_allocated_filename);
1884 return -1;
1885 }
1886 } else {
1887 /*
1888 * Last resort, if we don't have a build-id and couldn't find
1889 * any vmlinux file, try the running kernel kallsyms table.
1890 */
1891 kallsyms_filename = "/proc/kallsyms";
1892 }
1893
1894 do_kallsyms:
1895 err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
1896 if (err > 0)
1897 pr_debug("Using %s for symbols\n", kallsyms_filename);
1898 free(kallsyms_allocated_filename);
1899
1900 if (err > 0) {
1901 out_fixup:
1902 if (kallsyms_filename != NULL)
1903 dso__set_long_name(self, strdup("[kernel.kallsyms]"));
1904 map__fixup_start(map);
1905 map__fixup_end(map);
1906 }
1907
1908 return err;
1909 }
1910
1911 static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
1912 symbol_filter_t filter)
1913 {
1914 int err;
1915 const char *kallsyms_filename = NULL;
1916 struct machine *machine;
1917 char path[PATH_MAX];
1918
1919 if (!map->groups) {
1920 pr_debug("Guest kernel map hasn't the point to groups\n");
1921 return -1;
1922 }
1923 machine = map->groups->machine;
1924
1925 if (machine__is_default_guest(machine)) {
1926 /*
1927 * if the user specified a vmlinux filename, use it and only
1928 * it, reporting errors to the user if it cannot be used.
1929 * Or use file guest_kallsyms inputted by user on commandline
1930 */
1931 if (symbol_conf.default_guest_vmlinux_name != NULL) {
1932 err = dso__load_vmlinux(self, map,
1933 symbol_conf.default_guest_vmlinux_name, filter);
1934 goto out_try_fixup;
1935 }
1936
1937 kallsyms_filename = symbol_conf.default_guest_kallsyms;
1938 if (!kallsyms_filename)
1939 return -1;
1940 } else {
1941 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
1942 kallsyms_filename = path;
1943 }
1944
1945 err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
1946 if (err > 0)
1947 pr_debug("Using %s for symbols\n", kallsyms_filename);
1948
1949 out_try_fixup:
1950 if (err > 0) {
1951 if (kallsyms_filename != NULL) {
1952 machine__mmap_name(machine, path, sizeof(path));
1953 dso__set_long_name(self, strdup(path));
1954 }
1955 map__fixup_start(map);
1956 map__fixup_end(map);
1957 }
1958
1959 return err;
1960 }
1961
1962 static void dsos__add(struct list_head *head, struct dso *dso)
1963 {
1964 list_add_tail(&dso->node, head);
1965 }
1966
1967 static struct dso *dsos__find(struct list_head *head, const char *name)
1968 {
1969 struct dso *pos;
1970
1971 list_for_each_entry(pos, head, node)
1972 if (strcmp(pos->long_name, name) == 0)
1973 return pos;
1974 return NULL;
1975 }
1976
1977 struct dso *__dsos__findnew(struct list_head *head, const char *name)
1978 {
1979 struct dso *dso = dsos__find(head, name);
1980
1981 if (!dso) {
1982 dso = dso__new(name);
1983 if (dso != NULL) {
1984 dsos__add(head, dso);
1985 dso__set_basename(dso);
1986 }
1987 }
1988
1989 return dso;
1990 }
1991
1992 size_t __dsos__fprintf(struct list_head *head, FILE *fp)
1993 {
1994 struct dso *pos;
1995 size_t ret = 0;
1996
1997 list_for_each_entry(pos, head, node) {
1998 int i;
1999 for (i = 0; i < MAP__NR_TYPES; ++i)
2000 ret += dso__fprintf(pos, i, fp);
2001 }
2002
2003 return ret;
2004 }
2005
2006 size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp)
2007 {
2008 struct rb_node *nd;
2009 size_t ret = 0;
2010
2011 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
2012 struct machine *pos = rb_entry(nd, struct machine, rb_node);
2013 ret += __dsos__fprintf(&pos->kernel_dsos, fp);
2014 ret += __dsos__fprintf(&pos->user_dsos, fp);
2015 }
2016
2017 return ret;
2018 }
2019
2020 static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
2021 bool with_hits)
2022 {
2023 struct dso *pos;
2024 size_t ret = 0;
2025
2026 list_for_each_entry(pos, head, node) {
2027 if (with_hits && !pos->hit)
2028 continue;
2029 ret += dso__fprintf_buildid(pos, fp);
2030 ret += fprintf(fp, " %s\n", pos->long_name);
2031 }
2032 return ret;
2033 }
2034
2035 size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits)
2036 {
2037 return __dsos__fprintf_buildid(&self->kernel_dsos, fp, with_hits) +
2038 __dsos__fprintf_buildid(&self->user_dsos, fp, with_hits);
2039 }
2040
2041 size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits)
2042 {
2043 struct rb_node *nd;
2044 size_t ret = 0;
2045
2046 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
2047 struct machine *pos = rb_entry(nd, struct machine, rb_node);
2048 ret += machine__fprintf_dsos_buildid(pos, fp, with_hits);
2049 }
2050 return ret;
2051 }
2052
2053 struct dso *dso__new_kernel(const char *name)
2054 {
2055 struct dso *self = dso__new(name ?: "[kernel.kallsyms]");
2056
2057 if (self != NULL) {
2058 dso__set_short_name(self, "[kernel]");
2059 self->kernel = DSO_TYPE_KERNEL;
2060 }
2061
2062 return self;
2063 }
2064
2065 static struct dso *dso__new_guest_kernel(struct machine *machine,
2066 const char *name)
2067 {
2068 char bf[PATH_MAX];
2069 struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf)));
2070
2071 if (self != NULL) {
2072 dso__set_short_name(self, "[guest.kernel]");
2073 self->kernel = DSO_TYPE_GUEST_KERNEL;
2074 }
2075
2076 return self;
2077 }
2078
2079 void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine)
2080 {
2081 char path[PATH_MAX];
2082
2083 if (machine__is_default_guest(machine))
2084 return;
2085 sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
2086 if (sysfs__read_build_id(path, self->build_id,
2087 sizeof(self->build_id)) == 0)
2088 self->has_build_id = true;
2089 }
2090
2091 static struct dso *machine__create_kernel(struct machine *self)
2092 {
2093 const char *vmlinux_name = NULL;
2094 struct dso *kernel;
2095
2096 if (machine__is_host(self)) {
2097 vmlinux_name = symbol_conf.vmlinux_name;
2098 kernel = dso__new_kernel(vmlinux_name);
2099 } else {
2100 if (machine__is_default_guest(self))
2101 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
2102 kernel = dso__new_guest_kernel(self, vmlinux_name);
2103 }
2104
2105 if (kernel != NULL) {
2106 dso__read_running_kernel_build_id(kernel, self);
2107 dsos__add(&self->kernel_dsos, kernel);
2108 }
2109 return kernel;
2110 }
2111
2112 int __machine__create_kernel_maps(struct machine *self, struct dso *kernel)
2113 {
2114 enum map_type type;
2115
2116 for (type = 0; type < MAP__NR_TYPES; ++type) {
2117 struct kmap *kmap;
2118
2119 self->vmlinux_maps[type] = map__new2(0, kernel, type);
2120 if (self->vmlinux_maps[type] == NULL)
2121 return -1;
2122
2123 self->vmlinux_maps[type]->map_ip =
2124 self->vmlinux_maps[type]->unmap_ip = identity__map_ip;
2125
2126 kmap = map__kmap(self->vmlinux_maps[type]);
2127 kmap->kmaps = &self->kmaps;
2128 map_groups__insert(&self->kmaps, self->vmlinux_maps[type]);
2129 }
2130
2131 return 0;
2132 }
2133
2134 void machine__destroy_kernel_maps(struct machine *self)
2135 {
2136 enum map_type type;
2137
2138 for (type = 0; type < MAP__NR_TYPES; ++type) {
2139 struct kmap *kmap;
2140
2141 if (self->vmlinux_maps[type] == NULL)
2142 continue;
2143
2144 kmap = map__kmap(self->vmlinux_maps[type]);
2145 map_groups__remove(&self->kmaps, self->vmlinux_maps[type]);
2146 if (kmap->ref_reloc_sym) {
2147 /*
2148 * ref_reloc_sym is shared among all maps, so free just
2149 * on one of them.
2150 */
2151 if (type == MAP__FUNCTION) {
2152 free((char *)kmap->ref_reloc_sym->name);
2153 kmap->ref_reloc_sym->name = NULL;
2154 free(kmap->ref_reloc_sym);
2155 }
2156 kmap->ref_reloc_sym = NULL;
2157 }
2158
2159 map__delete(self->vmlinux_maps[type]);
2160 self->vmlinux_maps[type] = NULL;
2161 }
2162 }
2163
2164 int machine__create_kernel_maps(struct machine *self)
2165 {
2166 struct dso *kernel = machine__create_kernel(self);
2167
2168 if (kernel == NULL ||
2169 __machine__create_kernel_maps(self, kernel) < 0)
2170 return -1;
2171
2172 if (symbol_conf.use_modules && machine__create_modules(self) < 0)
2173 pr_debug("Problems creating module maps, continuing anyway...\n");
2174 /*
2175 * Now that we have all the maps created, just set the ->end of them:
2176 */
2177 map_groups__fixup_end(&self->kmaps);
2178 return 0;
2179 }
2180
2181 static void vmlinux_path__exit(void)
2182 {
2183 while (--vmlinux_path__nr_entries >= 0) {
2184 free(vmlinux_path[vmlinux_path__nr_entries]);
2185 vmlinux_path[vmlinux_path__nr_entries] = NULL;
2186 }
2187
2188 free(vmlinux_path);
2189 vmlinux_path = NULL;
2190 }
2191
2192 static int vmlinux_path__init(void)
2193 {
2194 struct utsname uts;
2195 char bf[PATH_MAX];
2196
2197 if (uname(&uts) < 0)
2198 return -1;
2199
2200 vmlinux_path = malloc(sizeof(char *) * 5);
2201 if (vmlinux_path == NULL)
2202 return -1;
2203
2204 vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux");
2205 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2206 goto out_fail;
2207 ++vmlinux_path__nr_entries;
2208 vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux");
2209 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2210 goto out_fail;
2211 ++vmlinux_path__nr_entries;
2212 snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release);
2213 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
2214 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2215 goto out_fail;
2216 ++vmlinux_path__nr_entries;
2217 snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release);
2218 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
2219 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2220 goto out_fail;
2221 ++vmlinux_path__nr_entries;
2222 snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux",
2223 uts.release);
2224 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
2225 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2226 goto out_fail;
2227 ++vmlinux_path__nr_entries;
2228
2229 return 0;
2230
2231 out_fail:
2232 vmlinux_path__exit();
2233 return -1;
2234 }
2235
2236 size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp)
2237 {
2238 int i;
2239 size_t printed = 0;
2240 struct dso *kdso = self->vmlinux_maps[MAP__FUNCTION]->dso;
2241
2242 if (kdso->has_build_id) {
2243 char filename[PATH_MAX];
2244 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
2245 printed += fprintf(fp, "[0] %s\n", filename);
2246 }
2247
2248 for (i = 0; i < vmlinux_path__nr_entries; ++i)
2249 printed += fprintf(fp, "[%d] %s\n",
2250 i + kdso->has_build_id, vmlinux_path[i]);
2251
2252 return printed;
2253 }
2254
2255 static int setup_list(struct strlist **list, const char *list_str,
2256 const char *list_name)
2257 {
2258 if (list_str == NULL)
2259 return 0;
2260
2261 *list = strlist__new(true, list_str);
2262 if (!*list) {
2263 pr_err("problems parsing %s list\n", list_name);
2264 return -1;
2265 }
2266 return 0;
2267 }
2268
2269 int symbol__init(void)
2270 {
2271 elf_version(EV_CURRENT);
2272 if (symbol_conf.sort_by_name)
2273 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
2274 sizeof(struct symbol));
2275
2276 if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0)
2277 return -1;
2278
2279 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2280 pr_err("'.' is the only non valid --field-separator argument\n");
2281 return -1;
2282 }
2283
2284 if (setup_list(&symbol_conf.dso_list,
2285 symbol_conf.dso_list_str, "dso") < 0)
2286 return -1;
2287
2288 if (setup_list(&symbol_conf.comm_list,
2289 symbol_conf.comm_list_str, "comm") < 0)
2290 goto out_free_dso_list;
2291
2292 if (setup_list(&symbol_conf.sym_list,
2293 symbol_conf.sym_list_str, "symbol") < 0)
2294 goto out_free_comm_list;
2295
2296 return 0;
2297
2298 out_free_dso_list:
2299 strlist__delete(symbol_conf.dso_list);
2300 out_free_comm_list:
2301 strlist__delete(symbol_conf.comm_list);
2302 return -1;
2303 }
2304
2305 void symbol__exit(void)
2306 {
2307 strlist__delete(symbol_conf.sym_list);
2308 strlist__delete(symbol_conf.dso_list);
2309 strlist__delete(symbol_conf.comm_list);
2310 vmlinux_path__exit();
2311 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2312 }
2313
2314 int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
2315 {
2316 struct machine *machine = machines__findnew(self, pid);
2317
2318 if (machine == NULL)
2319 return -1;
2320
2321 return machine__create_kernel_maps(machine);
2322 }
2323
2324 static int hex(char ch)
2325 {
2326 if ((ch >= '0') && (ch <= '9'))
2327 return ch - '0';
2328 if ((ch >= 'a') && (ch <= 'f'))
2329 return ch - 'a' + 10;
2330 if ((ch >= 'A') && (ch <= 'F'))
2331 return ch - 'A' + 10;
2332 return -1;
2333 }
2334
2335 /*
2336 * While we find nice hex chars, build a long_val.
2337 * Return number of chars processed.
2338 */
2339 int hex2u64(const char *ptr, u64 *long_val)
2340 {
2341 const char *p = ptr;
2342 *long_val = 0;
2343
2344 while (*p) {
2345 const int hex_val = hex(*p);
2346
2347 if (hex_val < 0)
2348 break;
2349
2350 *long_val = (*long_val << 4) | hex_val;
2351 p++;
2352 }
2353
2354 return p - ptr;
2355 }
2356
2357 char *strxfrchar(char *s, char from, char to)
2358 {
2359 char *p = s;
2360
2361 while ((p = strchr(p, from)) != NULL)
2362 *p++ = to;
2363
2364 return s;
2365 }
2366
2367 int machines__create_guest_kernel_maps(struct rb_root *self)
2368 {
2369 int ret = 0;
2370 struct dirent **namelist = NULL;
2371 int i, items = 0;
2372 char path[PATH_MAX];
2373 pid_t pid;
2374
2375 if (symbol_conf.default_guest_vmlinux_name ||
2376 symbol_conf.default_guest_modules ||
2377 symbol_conf.default_guest_kallsyms) {
2378 machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID);
2379 }
2380
2381 if (symbol_conf.guestmount) {
2382 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
2383 if (items <= 0)
2384 return -ENOENT;
2385 for (i = 0; i < items; i++) {
2386 if (!isdigit(namelist[i]->d_name[0])) {
2387 /* Filter out . and .. */
2388 continue;
2389 }
2390 pid = atoi(namelist[i]->d_name);
2391 sprintf(path, "%s/%s/proc/kallsyms",
2392 symbol_conf.guestmount,
2393 namelist[i]->d_name);
2394 ret = access(path, R_OK);
2395 if (ret) {
2396 pr_debug("Can't access file %s\n", path);
2397 goto failure;
2398 }
2399 machines__create_kernel_maps(self, pid);
2400 }
2401 failure:
2402 free(namelist);
2403 }
2404
2405 return ret;
2406 }
2407
2408 void machines__destroy_guest_kernel_maps(struct rb_root *self)
2409 {
2410 struct rb_node *next = rb_first(self);
2411
2412 while (next) {
2413 struct machine *pos = rb_entry(next, struct machine, rb_node);
2414
2415 next = rb_next(&pos->rb_node);
2416 rb_erase(&pos->rb_node, self);
2417 machine__delete(pos);
2418 }
2419 }
2420
2421 int machine__load_kallsyms(struct machine *self, const char *filename,
2422 enum map_type type, symbol_filter_t filter)
2423 {
2424 struct map *map = self->vmlinux_maps[type];
2425 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
2426
2427 if (ret > 0) {
2428 dso__set_loaded(map->dso, type);
2429 /*
2430 * Since /proc/kallsyms will have multiple sessions for the
2431 * kernel, with modules between them, fixup the end of all
2432 * sections.
2433 */
2434 __map_groups__fixup_end(&self->kmaps, type);
2435 }
2436
2437 return ret;
2438 }
2439
2440 int machine__load_vmlinux_path(struct machine *self, enum map_type type,
2441 symbol_filter_t filter)
2442 {
2443 struct map *map = self->vmlinux_maps[type];
2444 int ret = dso__load_vmlinux_path(map->dso, map, filter);
2445
2446 if (ret > 0) {
2447 dso__set_loaded(map->dso, type);
2448 map__reloc_vmlinux(map);
2449 }
2450
2451 return ret;
2452 }
This page took 0.124913 seconds and 4 git commands to generate.