perf hists: Enlarge pid sort entry size
[deliverable/linux.git] / tools / perf / util / symbol-elf.c
1 #include <fcntl.h>
2 #include <stdio.h>
3 #include <errno.h>
4 #include <string.h>
5 #include <unistd.h>
6 #include <inttypes.h>
7
8 #include "symbol.h"
9 #include "demangle-java.h"
10 #include "machine.h"
11 #include "vdso.h"
12 #include <symbol/kallsyms.h>
13 #include "debug.h"
14
15 #ifndef EM_AARCH64
16 #define EM_AARCH64 183 /* ARM 64 bit */
17 #endif
18
19
20 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
21 extern char *cplus_demangle(const char *, int);
22
23 static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
24 {
25 return cplus_demangle(c, i);
26 }
27 #else
28 #ifdef NO_DEMANGLE
29 static inline char *bfd_demangle(void __maybe_unused *v,
30 const char __maybe_unused *c,
31 int __maybe_unused i)
32 {
33 return NULL;
34 }
35 #else
36 #define PACKAGE 'perf'
37 #include <bfd.h>
38 #endif
39 #endif
40
41 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
42 static int elf_getphdrnum(Elf *elf, size_t *dst)
43 {
44 GElf_Ehdr gehdr;
45 GElf_Ehdr *ehdr;
46
47 ehdr = gelf_getehdr(elf, &gehdr);
48 if (!ehdr)
49 return -1;
50
51 *dst = ehdr->e_phnum;
52
53 return 0;
54 }
55 #endif
56
57 #ifndef NT_GNU_BUILD_ID
58 #define NT_GNU_BUILD_ID 3
59 #endif
60
61 /**
62 * elf_symtab__for_each_symbol - iterate thru all the symbols
63 *
64 * @syms: struct elf_symtab instance to iterate
65 * @idx: uint32_t idx
66 * @sym: GElf_Sym iterator
67 */
68 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
69 for (idx = 0, gelf_getsym(syms, idx, &sym);\
70 idx < nr_syms; \
71 idx++, gelf_getsym(syms, idx, &sym))
72
73 static inline uint8_t elf_sym__type(const GElf_Sym *sym)
74 {
75 return GELF_ST_TYPE(sym->st_info);
76 }
77
78 #ifndef STT_GNU_IFUNC
79 #define STT_GNU_IFUNC 10
80 #endif
81
82 static inline int elf_sym__is_function(const GElf_Sym *sym)
83 {
84 return (elf_sym__type(sym) == STT_FUNC ||
85 elf_sym__type(sym) == STT_GNU_IFUNC) &&
86 sym->st_name != 0 &&
87 sym->st_shndx != SHN_UNDEF;
88 }
89
90 static inline bool elf_sym__is_object(const GElf_Sym *sym)
91 {
92 return elf_sym__type(sym) == STT_OBJECT &&
93 sym->st_name != 0 &&
94 sym->st_shndx != SHN_UNDEF;
95 }
96
97 static inline int elf_sym__is_label(const GElf_Sym *sym)
98 {
99 return elf_sym__type(sym) == STT_NOTYPE &&
100 sym->st_name != 0 &&
101 sym->st_shndx != SHN_UNDEF &&
102 sym->st_shndx != SHN_ABS;
103 }
104
105 static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
106 {
107 switch (type) {
108 case MAP__FUNCTION:
109 return elf_sym__is_function(sym);
110 case MAP__VARIABLE:
111 return elf_sym__is_object(sym);
112 default:
113 return false;
114 }
115 }
116
117 static inline const char *elf_sym__name(const GElf_Sym *sym,
118 const Elf_Data *symstrs)
119 {
120 return symstrs->d_buf + sym->st_name;
121 }
122
123 static inline const char *elf_sec__name(const GElf_Shdr *shdr,
124 const Elf_Data *secstrs)
125 {
126 return secstrs->d_buf + shdr->sh_name;
127 }
128
129 static inline int elf_sec__is_text(const GElf_Shdr *shdr,
130 const Elf_Data *secstrs)
131 {
132 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
133 }
134
135 static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
136 const Elf_Data *secstrs)
137 {
138 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
139 }
140
141 static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
142 enum map_type type)
143 {
144 switch (type) {
145 case MAP__FUNCTION:
146 return elf_sec__is_text(shdr, secstrs);
147 case MAP__VARIABLE:
148 return elf_sec__is_data(shdr, secstrs);
149 default:
150 return false;
151 }
152 }
153
154 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
155 {
156 Elf_Scn *sec = NULL;
157 GElf_Shdr shdr;
158 size_t cnt = 1;
159
160 while ((sec = elf_nextscn(elf, sec)) != NULL) {
161 gelf_getshdr(sec, &shdr);
162
163 if ((addr >= shdr.sh_addr) &&
164 (addr < (shdr.sh_addr + shdr.sh_size)))
165 return cnt;
166
167 ++cnt;
168 }
169
170 return -1;
171 }
172
173 Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
174 GElf_Shdr *shp, const char *name, size_t *idx)
175 {
176 Elf_Scn *sec = NULL;
177 size_t cnt = 1;
178
179 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
180 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
181 return NULL;
182
183 while ((sec = elf_nextscn(elf, sec)) != NULL) {
184 char *str;
185
186 gelf_getshdr(sec, shp);
187 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
188 if (str && !strcmp(name, str)) {
189 if (idx)
190 *idx = cnt;
191 return sec;
192 }
193 ++cnt;
194 }
195
196 return NULL;
197 }
198
199 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
200 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
201 idx < nr_entries; \
202 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
203
204 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
205 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
206 idx < nr_entries; \
207 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
208
209 /*
210 * We need to check if we have a .dynsym, so that we can handle the
211 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
212 * .dynsym or .symtab).
213 * And always look at the original dso, not at debuginfo packages, that
214 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
215 */
216 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
217 symbol_filter_t filter)
218 {
219 uint32_t nr_rel_entries, idx;
220 GElf_Sym sym;
221 u64 plt_offset;
222 GElf_Shdr shdr_plt;
223 struct symbol *f;
224 GElf_Shdr shdr_rel_plt, shdr_dynsym;
225 Elf_Data *reldata, *syms, *symstrs;
226 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
227 size_t dynsym_idx;
228 GElf_Ehdr ehdr;
229 char sympltname[1024];
230 Elf *elf;
231 int nr = 0, symidx, err = 0;
232
233 if (!ss->dynsym)
234 return 0;
235
236 elf = ss->elf;
237 ehdr = ss->ehdr;
238
239 scn_dynsym = ss->dynsym;
240 shdr_dynsym = ss->dynshdr;
241 dynsym_idx = ss->dynsym_idx;
242
243 if (scn_dynsym == NULL)
244 goto out_elf_end;
245
246 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
247 ".rela.plt", NULL);
248 if (scn_plt_rel == NULL) {
249 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
250 ".rel.plt", NULL);
251 if (scn_plt_rel == NULL)
252 goto out_elf_end;
253 }
254
255 err = -1;
256
257 if (shdr_rel_plt.sh_link != dynsym_idx)
258 goto out_elf_end;
259
260 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
261 goto out_elf_end;
262
263 /*
264 * Fetch the relocation section to find the idxes to the GOT
265 * and the symbols in the .dynsym they refer to.
266 */
267 reldata = elf_getdata(scn_plt_rel, NULL);
268 if (reldata == NULL)
269 goto out_elf_end;
270
271 syms = elf_getdata(scn_dynsym, NULL);
272 if (syms == NULL)
273 goto out_elf_end;
274
275 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
276 if (scn_symstrs == NULL)
277 goto out_elf_end;
278
279 symstrs = elf_getdata(scn_symstrs, NULL);
280 if (symstrs == NULL)
281 goto out_elf_end;
282
283 if (symstrs->d_size == 0)
284 goto out_elf_end;
285
286 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
287 plt_offset = shdr_plt.sh_offset;
288
289 if (shdr_rel_plt.sh_type == SHT_RELA) {
290 GElf_Rela pos_mem, *pos;
291
292 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
293 nr_rel_entries) {
294 symidx = GELF_R_SYM(pos->r_info);
295 plt_offset += shdr_plt.sh_entsize;
296 gelf_getsym(syms, symidx, &sym);
297 snprintf(sympltname, sizeof(sympltname),
298 "%s@plt", elf_sym__name(&sym, symstrs));
299
300 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
301 STB_GLOBAL, sympltname);
302 if (!f)
303 goto out_elf_end;
304
305 if (filter && filter(map, f))
306 symbol__delete(f);
307 else {
308 symbols__insert(&dso->symbols[map->type], f);
309 ++nr;
310 }
311 }
312 } else if (shdr_rel_plt.sh_type == SHT_REL) {
313 GElf_Rel pos_mem, *pos;
314 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
315 nr_rel_entries) {
316 symidx = GELF_R_SYM(pos->r_info);
317 plt_offset += shdr_plt.sh_entsize;
318 gelf_getsym(syms, symidx, &sym);
319 snprintf(sympltname, sizeof(sympltname),
320 "%s@plt", elf_sym__name(&sym, symstrs));
321
322 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
323 STB_GLOBAL, sympltname);
324 if (!f)
325 goto out_elf_end;
326
327 if (filter && filter(map, f))
328 symbol__delete(f);
329 else {
330 symbols__insert(&dso->symbols[map->type], f);
331 ++nr;
332 }
333 }
334 }
335
336 err = 0;
337 out_elf_end:
338 if (err == 0)
339 return nr;
340 pr_debug("%s: problems reading %s PLT info.\n",
341 __func__, dso->long_name);
342 return 0;
343 }
344
345 /*
346 * Align offset to 4 bytes as needed for note name and descriptor data.
347 */
348 #define NOTE_ALIGN(n) (((n) + 3) & -4U)
349
350 static int elf_read_build_id(Elf *elf, void *bf, size_t size)
351 {
352 int err = -1;
353 GElf_Ehdr ehdr;
354 GElf_Shdr shdr;
355 Elf_Data *data;
356 Elf_Scn *sec;
357 Elf_Kind ek;
358 void *ptr;
359
360 if (size < BUILD_ID_SIZE)
361 goto out;
362
363 ek = elf_kind(elf);
364 if (ek != ELF_K_ELF)
365 goto out;
366
367 if (gelf_getehdr(elf, &ehdr) == NULL) {
368 pr_err("%s: cannot get elf header.\n", __func__);
369 goto out;
370 }
371
372 /*
373 * Check following sections for notes:
374 * '.note.gnu.build-id'
375 * '.notes'
376 * '.note' (VDSO specific)
377 */
378 do {
379 sec = elf_section_by_name(elf, &ehdr, &shdr,
380 ".note.gnu.build-id", NULL);
381 if (sec)
382 break;
383
384 sec = elf_section_by_name(elf, &ehdr, &shdr,
385 ".notes", NULL);
386 if (sec)
387 break;
388
389 sec = elf_section_by_name(elf, &ehdr, &shdr,
390 ".note", NULL);
391 if (sec)
392 break;
393
394 return err;
395
396 } while (0);
397
398 data = elf_getdata(sec, NULL);
399 if (data == NULL)
400 goto out;
401
402 ptr = data->d_buf;
403 while (ptr < (data->d_buf + data->d_size)) {
404 GElf_Nhdr *nhdr = ptr;
405 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
406 descsz = NOTE_ALIGN(nhdr->n_descsz);
407 const char *name;
408
409 ptr += sizeof(*nhdr);
410 name = ptr;
411 ptr += namesz;
412 if (nhdr->n_type == NT_GNU_BUILD_ID &&
413 nhdr->n_namesz == sizeof("GNU")) {
414 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
415 size_t sz = min(size, descsz);
416 memcpy(bf, ptr, sz);
417 memset(bf + sz, 0, size - sz);
418 err = descsz;
419 break;
420 }
421 }
422 ptr += descsz;
423 }
424
425 out:
426 return err;
427 }
428
429 int filename__read_build_id(const char *filename, void *bf, size_t size)
430 {
431 int fd, err = -1;
432 Elf *elf;
433
434 if (size < BUILD_ID_SIZE)
435 goto out;
436
437 fd = open(filename, O_RDONLY);
438 if (fd < 0)
439 goto out;
440
441 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
442 if (elf == NULL) {
443 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
444 goto out_close;
445 }
446
447 err = elf_read_build_id(elf, bf, size);
448
449 elf_end(elf);
450 out_close:
451 close(fd);
452 out:
453 return err;
454 }
455
456 int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
457 {
458 int fd, err = -1;
459
460 if (size < BUILD_ID_SIZE)
461 goto out;
462
463 fd = open(filename, O_RDONLY);
464 if (fd < 0)
465 goto out;
466
467 while (1) {
468 char bf[BUFSIZ];
469 GElf_Nhdr nhdr;
470 size_t namesz, descsz;
471
472 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
473 break;
474
475 namesz = NOTE_ALIGN(nhdr.n_namesz);
476 descsz = NOTE_ALIGN(nhdr.n_descsz);
477 if (nhdr.n_type == NT_GNU_BUILD_ID &&
478 nhdr.n_namesz == sizeof("GNU")) {
479 if (read(fd, bf, namesz) != (ssize_t)namesz)
480 break;
481 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
482 size_t sz = min(descsz, size);
483 if (read(fd, build_id, sz) == (ssize_t)sz) {
484 memset(build_id + sz, 0, size - sz);
485 err = 0;
486 break;
487 }
488 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
489 break;
490 } else {
491 int n = namesz + descsz;
492 if (read(fd, bf, n) != n)
493 break;
494 }
495 }
496 close(fd);
497 out:
498 return err;
499 }
500
501 int filename__read_debuglink(const char *filename, char *debuglink,
502 size_t size)
503 {
504 int fd, err = -1;
505 Elf *elf;
506 GElf_Ehdr ehdr;
507 GElf_Shdr shdr;
508 Elf_Data *data;
509 Elf_Scn *sec;
510 Elf_Kind ek;
511
512 fd = open(filename, O_RDONLY);
513 if (fd < 0)
514 goto out;
515
516 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
517 if (elf == NULL) {
518 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
519 goto out_close;
520 }
521
522 ek = elf_kind(elf);
523 if (ek != ELF_K_ELF)
524 goto out_elf_end;
525
526 if (gelf_getehdr(elf, &ehdr) == NULL) {
527 pr_err("%s: cannot get elf header.\n", __func__);
528 goto out_elf_end;
529 }
530
531 sec = elf_section_by_name(elf, &ehdr, &shdr,
532 ".gnu_debuglink", NULL);
533 if (sec == NULL)
534 goto out_elf_end;
535
536 data = elf_getdata(sec, NULL);
537 if (data == NULL)
538 goto out_elf_end;
539
540 /* the start of this section is a zero-terminated string */
541 strncpy(debuglink, data->d_buf, size);
542
543 err = 0;
544
545 out_elf_end:
546 elf_end(elf);
547 out_close:
548 close(fd);
549 out:
550 return err;
551 }
552
553 static int dso__swap_init(struct dso *dso, unsigned char eidata)
554 {
555 static unsigned int const endian = 1;
556
557 dso->needs_swap = DSO_SWAP__NO;
558
559 switch (eidata) {
560 case ELFDATA2LSB:
561 /* We are big endian, DSO is little endian. */
562 if (*(unsigned char const *)&endian != 1)
563 dso->needs_swap = DSO_SWAP__YES;
564 break;
565
566 case ELFDATA2MSB:
567 /* We are little endian, DSO is big endian. */
568 if (*(unsigned char const *)&endian != 0)
569 dso->needs_swap = DSO_SWAP__YES;
570 break;
571
572 default:
573 pr_err("unrecognized DSO data encoding %d\n", eidata);
574 return -EINVAL;
575 }
576
577 return 0;
578 }
579
580 static int decompress_kmodule(struct dso *dso, const char *name,
581 enum dso_binary_type type)
582 {
583 int fd = -1;
584 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
585 struct kmod_path m;
586
587 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
588 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
589 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
590 return -1;
591
592 if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
593 name = dso->long_name;
594
595 if (kmod_path__parse_ext(&m, name) || !m.comp)
596 return -1;
597
598 fd = mkstemp(tmpbuf);
599 if (fd < 0) {
600 dso->load_errno = errno;
601 goto out;
602 }
603
604 if (!decompress_to_file(m.ext, name, fd)) {
605 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
606 close(fd);
607 fd = -1;
608 }
609
610 unlink(tmpbuf);
611
612 out:
613 free(m.ext);
614 return fd;
615 }
616
617 bool symsrc__possibly_runtime(struct symsrc *ss)
618 {
619 return ss->dynsym || ss->opdsec;
620 }
621
622 bool symsrc__has_symtab(struct symsrc *ss)
623 {
624 return ss->symtab != NULL;
625 }
626
627 void symsrc__destroy(struct symsrc *ss)
628 {
629 zfree(&ss->name);
630 elf_end(ss->elf);
631 close(ss->fd);
632 }
633
634 bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr)
635 {
636 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL;
637 }
638
639 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
640 enum dso_binary_type type)
641 {
642 int err = -1;
643 GElf_Ehdr ehdr;
644 Elf *elf;
645 int fd;
646
647 if (dso__needs_decompress(dso)) {
648 fd = decompress_kmodule(dso, name, type);
649 if (fd < 0)
650 return -1;
651 } else {
652 fd = open(name, O_RDONLY);
653 if (fd < 0) {
654 dso->load_errno = errno;
655 return -1;
656 }
657 }
658
659 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
660 if (elf == NULL) {
661 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
662 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
663 goto out_close;
664 }
665
666 if (gelf_getehdr(elf, &ehdr) == NULL) {
667 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
668 pr_debug("%s: cannot get elf header.\n", __func__);
669 goto out_elf_end;
670 }
671
672 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
673 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
674 goto out_elf_end;
675 }
676
677 /* Always reject images with a mismatched build-id: */
678 if (dso->has_build_id) {
679 u8 build_id[BUILD_ID_SIZE];
680
681 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) {
682 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
683 goto out_elf_end;
684 }
685
686 if (!dso__build_id_equal(dso, build_id)) {
687 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
688 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
689 goto out_elf_end;
690 }
691 }
692
693 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
694
695 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
696 NULL);
697 if (ss->symshdr.sh_type != SHT_SYMTAB)
698 ss->symtab = NULL;
699
700 ss->dynsym_idx = 0;
701 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
702 &ss->dynsym_idx);
703 if (ss->dynshdr.sh_type != SHT_DYNSYM)
704 ss->dynsym = NULL;
705
706 ss->opdidx = 0;
707 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
708 &ss->opdidx);
709 if (ss->opdshdr.sh_type != SHT_PROGBITS)
710 ss->opdsec = NULL;
711
712 if (dso->kernel == DSO_TYPE_USER)
713 ss->adjust_symbols = true;
714 else
715 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
716
717 ss->name = strdup(name);
718 if (!ss->name) {
719 dso->load_errno = errno;
720 goto out_elf_end;
721 }
722
723 ss->elf = elf;
724 ss->fd = fd;
725 ss->ehdr = ehdr;
726 ss->type = type;
727
728 return 0;
729
730 out_elf_end:
731 elf_end(elf);
732 out_close:
733 close(fd);
734 return err;
735 }
736
737 /**
738 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
739 * @kmap: kernel maps and relocation reference symbol
740 *
741 * This function returns %true if we are dealing with the kernel maps and the
742 * relocation reference symbol has not yet been found. Otherwise %false is
743 * returned.
744 */
745 static bool ref_reloc_sym_not_found(struct kmap *kmap)
746 {
747 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
748 !kmap->ref_reloc_sym->unrelocated_addr;
749 }
750
751 /**
752 * ref_reloc - kernel relocation offset.
753 * @kmap: kernel maps and relocation reference symbol
754 *
755 * This function returns the offset of kernel addresses as determined by using
756 * the relocation reference symbol i.e. if the kernel has not been relocated
757 * then the return value is zero.
758 */
759 static u64 ref_reloc(struct kmap *kmap)
760 {
761 if (kmap && kmap->ref_reloc_sym &&
762 kmap->ref_reloc_sym->unrelocated_addr)
763 return kmap->ref_reloc_sym->addr -
764 kmap->ref_reloc_sym->unrelocated_addr;
765 return 0;
766 }
767
768 static bool want_demangle(bool is_kernel_sym)
769 {
770 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
771 }
772
773 void __weak arch__sym_update(struct symbol *s __maybe_unused,
774 GElf_Sym *sym __maybe_unused) { }
775
776 int dso__load_sym(struct dso *dso, struct map *map,
777 struct symsrc *syms_ss, struct symsrc *runtime_ss,
778 symbol_filter_t filter, int kmodule)
779 {
780 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
781 struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
782 struct map *curr_map = map;
783 struct dso *curr_dso = dso;
784 Elf_Data *symstrs, *secstrs;
785 uint32_t nr_syms;
786 int err = -1;
787 uint32_t idx;
788 GElf_Ehdr ehdr;
789 GElf_Shdr shdr;
790 GElf_Shdr tshdr;
791 Elf_Data *syms, *opddata = NULL;
792 GElf_Sym sym;
793 Elf_Scn *sec, *sec_strndx;
794 Elf *elf;
795 int nr = 0;
796 bool remap_kernel = false, adjust_kernel_syms = false;
797
798 if (kmap && !kmaps)
799 return -1;
800
801 dso->symtab_type = syms_ss->type;
802 dso->is_64_bit = syms_ss->is_64_bit;
803 dso->rel = syms_ss->ehdr.e_type == ET_REL;
804
805 /*
806 * Modules may already have symbols from kallsyms, but those symbols
807 * have the wrong values for the dso maps, so remove them.
808 */
809 if (kmodule && syms_ss->symtab)
810 symbols__delete(&dso->symbols[map->type]);
811
812 if (!syms_ss->symtab) {
813 /*
814 * If the vmlinux is stripped, fail so we will fall back
815 * to using kallsyms. The vmlinux runtime symbols aren't
816 * of much use.
817 */
818 if (dso->kernel)
819 goto out_elf_end;
820
821 syms_ss->symtab = syms_ss->dynsym;
822 syms_ss->symshdr = syms_ss->dynshdr;
823 }
824
825 elf = syms_ss->elf;
826 ehdr = syms_ss->ehdr;
827 sec = syms_ss->symtab;
828 shdr = syms_ss->symshdr;
829
830 if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL))
831 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
832
833 if (runtime_ss->opdsec)
834 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
835
836 syms = elf_getdata(sec, NULL);
837 if (syms == NULL)
838 goto out_elf_end;
839
840 sec = elf_getscn(elf, shdr.sh_link);
841 if (sec == NULL)
842 goto out_elf_end;
843
844 symstrs = elf_getdata(sec, NULL);
845 if (symstrs == NULL)
846 goto out_elf_end;
847
848 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
849 if (sec_strndx == NULL)
850 goto out_elf_end;
851
852 secstrs = elf_getdata(sec_strndx, NULL);
853 if (secstrs == NULL)
854 goto out_elf_end;
855
856 nr_syms = shdr.sh_size / shdr.sh_entsize;
857
858 memset(&sym, 0, sizeof(sym));
859
860 /*
861 * The kernel relocation symbol is needed in advance in order to adjust
862 * kernel maps correctly.
863 */
864 if (ref_reloc_sym_not_found(kmap)) {
865 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
866 const char *elf_name = elf_sym__name(&sym, symstrs);
867
868 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
869 continue;
870 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
871 map->reloc = kmap->ref_reloc_sym->addr -
872 kmap->ref_reloc_sym->unrelocated_addr;
873 break;
874 }
875 }
876
877 /*
878 * Handle any relocation of vdso necessary because older kernels
879 * attempted to prelink vdso to its virtual address.
880 */
881 if (dso__is_vdso(dso))
882 map->reloc = map->start - dso->text_offset;
883
884 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
885 /*
886 * Initial kernel and module mappings do not map to the dso. For
887 * function mappings, flag the fixups.
888 */
889 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
890 remap_kernel = true;
891 adjust_kernel_syms = dso->adjust_symbols;
892 }
893 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
894 struct symbol *f;
895 const char *elf_name = elf_sym__name(&sym, symstrs);
896 char *demangled = NULL;
897 int is_label = elf_sym__is_label(&sym);
898 const char *section_name;
899 bool used_opd = false;
900
901 if (!is_label && !elf_sym__is_a(&sym, map->type))
902 continue;
903
904 /* Reject ARM ELF "mapping symbols": these aren't unique and
905 * don't identify functions, so will confuse the profile
906 * output: */
907 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
908 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
909 && (elf_name[2] == '\0' || elf_name[2] == '.'))
910 continue;
911 }
912
913 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
914 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
915 u64 *opd = opddata->d_buf + offset;
916 sym.st_value = DSO__SWAP(dso, u64, *opd);
917 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
918 sym.st_value);
919 used_opd = true;
920 }
921 /*
922 * When loading symbols in a data mapping, ABS symbols (which
923 * has a value of SHN_ABS in its st_shndx) failed at
924 * elf_getscn(). And it marks the loading as a failure so
925 * already loaded symbols cannot be fixed up.
926 *
927 * I'm not sure what should be done. Just ignore them for now.
928 * - Namhyung Kim
929 */
930 if (sym.st_shndx == SHN_ABS)
931 continue;
932
933 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
934 if (!sec)
935 goto out_elf_end;
936
937 gelf_getshdr(sec, &shdr);
938
939 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
940 continue;
941
942 section_name = elf_sec__name(&shdr, secstrs);
943
944 /* On ARM, symbols for thumb functions have 1 added to
945 * the symbol address as a flag - remove it */
946 if ((ehdr.e_machine == EM_ARM) &&
947 (map->type == MAP__FUNCTION) &&
948 (sym.st_value & 1))
949 --sym.st_value;
950
951 if (dso->kernel || kmodule) {
952 char dso_name[PATH_MAX];
953
954 /* Adjust symbol to map to file offset */
955 if (adjust_kernel_syms)
956 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
957
958 if (strcmp(section_name,
959 (curr_dso->short_name +
960 dso->short_name_len)) == 0)
961 goto new_symbol;
962
963 if (strcmp(section_name, ".text") == 0) {
964 /*
965 * The initial kernel mapping is based on
966 * kallsyms and identity maps. Overwrite it to
967 * map to the kernel dso.
968 */
969 if (remap_kernel && dso->kernel) {
970 remap_kernel = false;
971 map->start = shdr.sh_addr +
972 ref_reloc(kmap);
973 map->end = map->start + shdr.sh_size;
974 map->pgoff = shdr.sh_offset;
975 map->map_ip = map__map_ip;
976 map->unmap_ip = map__unmap_ip;
977 /* Ensure maps are correctly ordered */
978 if (kmaps) {
979 map__get(map);
980 map_groups__remove(kmaps, map);
981 map_groups__insert(kmaps, map);
982 map__put(map);
983 }
984 }
985
986 /*
987 * The initial module mapping is based on
988 * /proc/modules mapped to offset zero.
989 * Overwrite it to map to the module dso.
990 */
991 if (remap_kernel && kmodule) {
992 remap_kernel = false;
993 map->pgoff = shdr.sh_offset;
994 }
995
996 curr_map = map;
997 curr_dso = dso;
998 goto new_symbol;
999 }
1000
1001 if (!kmap)
1002 goto new_symbol;
1003
1004 snprintf(dso_name, sizeof(dso_name),
1005 "%s%s", dso->short_name, section_name);
1006
1007 curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
1008 if (curr_map == NULL) {
1009 u64 start = sym.st_value;
1010
1011 if (kmodule)
1012 start += map->start + shdr.sh_offset;
1013
1014 curr_dso = dso__new(dso_name);
1015 if (curr_dso == NULL)
1016 goto out_elf_end;
1017 curr_dso->kernel = dso->kernel;
1018 curr_dso->long_name = dso->long_name;
1019 curr_dso->long_name_len = dso->long_name_len;
1020 curr_map = map__new2(start, curr_dso,
1021 map->type);
1022 dso__put(curr_dso);
1023 if (curr_map == NULL) {
1024 goto out_elf_end;
1025 }
1026 if (adjust_kernel_syms) {
1027 curr_map->start = shdr.sh_addr +
1028 ref_reloc(kmap);
1029 curr_map->end = curr_map->start +
1030 shdr.sh_size;
1031 curr_map->pgoff = shdr.sh_offset;
1032 } else {
1033 curr_map->map_ip = identity__map_ip;
1034 curr_map->unmap_ip = identity__map_ip;
1035 }
1036 curr_dso->symtab_type = dso->symtab_type;
1037 map_groups__insert(kmaps, curr_map);
1038 /*
1039 * Add it before we drop the referece to curr_map,
1040 * i.e. while we still are sure to have a reference
1041 * to this DSO via curr_map->dso.
1042 */
1043 dsos__add(&map->groups->machine->dsos, curr_dso);
1044 /* kmaps already got it */
1045 map__put(curr_map);
1046 dso__set_loaded(curr_dso, map->type);
1047 } else
1048 curr_dso = curr_map->dso;
1049
1050 goto new_symbol;
1051 }
1052
1053 if ((used_opd && runtime_ss->adjust_symbols)
1054 || (!used_opd && syms_ss->adjust_symbols)) {
1055 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1056 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1057 (u64)sym.st_value, (u64)shdr.sh_addr,
1058 (u64)shdr.sh_offset);
1059 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1060 }
1061 new_symbol:
1062 /*
1063 * We need to figure out if the object was created from C++ sources
1064 * DWARF DW_compile_unit has this, but we don't always have access
1065 * to it...
1066 */
1067 if (want_demangle(dso->kernel || kmodule)) {
1068 int demangle_flags = DMGL_NO_OPTS;
1069 if (verbose)
1070 demangle_flags = DMGL_PARAMS | DMGL_ANSI;
1071
1072 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
1073 if (demangled == NULL)
1074 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
1075 if (demangled != NULL)
1076 elf_name = demangled;
1077 }
1078 f = symbol__new(sym.st_value, sym.st_size,
1079 GELF_ST_BIND(sym.st_info), elf_name);
1080 free(demangled);
1081 if (!f)
1082 goto out_elf_end;
1083
1084 arch__sym_update(f, &sym);
1085
1086 if (filter && filter(curr_map, f))
1087 symbol__delete(f);
1088 else {
1089 symbols__insert(&curr_dso->symbols[curr_map->type], f);
1090 nr++;
1091 }
1092 }
1093
1094 /*
1095 * For misannotated, zeroed, ASM function sizes.
1096 */
1097 if (nr > 0) {
1098 if (!symbol_conf.allow_aliases)
1099 symbols__fixup_duplicate(&dso->symbols[map->type]);
1100 symbols__fixup_end(&dso->symbols[map->type]);
1101 if (kmap) {
1102 /*
1103 * We need to fixup this here too because we create new
1104 * maps here, for things like vsyscall sections.
1105 */
1106 __map_groups__fixup_end(kmaps, map->type);
1107 }
1108 }
1109 err = nr;
1110 out_elf_end:
1111 return err;
1112 }
1113
1114 static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1115 {
1116 GElf_Phdr phdr;
1117 size_t i, phdrnum;
1118 int err;
1119 u64 sz;
1120
1121 if (elf_getphdrnum(elf, &phdrnum))
1122 return -1;
1123
1124 for (i = 0; i < phdrnum; i++) {
1125 if (gelf_getphdr(elf, i, &phdr) == NULL)
1126 return -1;
1127 if (phdr.p_type != PT_LOAD)
1128 continue;
1129 if (exe) {
1130 if (!(phdr.p_flags & PF_X))
1131 continue;
1132 } else {
1133 if (!(phdr.p_flags & PF_R))
1134 continue;
1135 }
1136 sz = min(phdr.p_memsz, phdr.p_filesz);
1137 if (!sz)
1138 continue;
1139 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1140 if (err)
1141 return err;
1142 }
1143 return 0;
1144 }
1145
1146 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1147 bool *is_64_bit)
1148 {
1149 int err;
1150 Elf *elf;
1151
1152 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1153 if (elf == NULL)
1154 return -1;
1155
1156 if (is_64_bit)
1157 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1158
1159 err = elf_read_maps(elf, exe, mapfn, data);
1160
1161 elf_end(elf);
1162 return err;
1163 }
1164
1165 enum dso_type dso__type_fd(int fd)
1166 {
1167 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1168 GElf_Ehdr ehdr;
1169 Elf_Kind ek;
1170 Elf *elf;
1171
1172 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1173 if (elf == NULL)
1174 goto out;
1175
1176 ek = elf_kind(elf);
1177 if (ek != ELF_K_ELF)
1178 goto out_end;
1179
1180 if (gelf_getclass(elf) == ELFCLASS64) {
1181 dso_type = DSO__TYPE_64BIT;
1182 goto out_end;
1183 }
1184
1185 if (gelf_getehdr(elf, &ehdr) == NULL)
1186 goto out_end;
1187
1188 if (ehdr.e_machine == EM_X86_64)
1189 dso_type = DSO__TYPE_X32BIT;
1190 else
1191 dso_type = DSO__TYPE_32BIT;
1192 out_end:
1193 elf_end(elf);
1194 out:
1195 return dso_type;
1196 }
1197
1198 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1199 {
1200 ssize_t r;
1201 size_t n;
1202 int err = -1;
1203 char *buf = malloc(page_size);
1204
1205 if (buf == NULL)
1206 return -1;
1207
1208 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1209 goto out;
1210
1211 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1212 goto out;
1213
1214 while (len) {
1215 n = page_size;
1216 if (len < n)
1217 n = len;
1218 /* Use read because mmap won't work on proc files */
1219 r = read(from, buf, n);
1220 if (r < 0)
1221 goto out;
1222 if (!r)
1223 break;
1224 n = r;
1225 r = write(to, buf, n);
1226 if (r < 0)
1227 goto out;
1228 if ((size_t)r != n)
1229 goto out;
1230 len -= n;
1231 }
1232
1233 err = 0;
1234 out:
1235 free(buf);
1236 return err;
1237 }
1238
1239 struct kcore {
1240 int fd;
1241 int elfclass;
1242 Elf *elf;
1243 GElf_Ehdr ehdr;
1244 };
1245
1246 static int kcore__open(struct kcore *kcore, const char *filename)
1247 {
1248 GElf_Ehdr *ehdr;
1249
1250 kcore->fd = open(filename, O_RDONLY);
1251 if (kcore->fd == -1)
1252 return -1;
1253
1254 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1255 if (!kcore->elf)
1256 goto out_close;
1257
1258 kcore->elfclass = gelf_getclass(kcore->elf);
1259 if (kcore->elfclass == ELFCLASSNONE)
1260 goto out_end;
1261
1262 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1263 if (!ehdr)
1264 goto out_end;
1265
1266 return 0;
1267
1268 out_end:
1269 elf_end(kcore->elf);
1270 out_close:
1271 close(kcore->fd);
1272 return -1;
1273 }
1274
1275 static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1276 bool temp)
1277 {
1278 kcore->elfclass = elfclass;
1279
1280 if (temp)
1281 kcore->fd = mkstemp(filename);
1282 else
1283 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1284 if (kcore->fd == -1)
1285 return -1;
1286
1287 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1288 if (!kcore->elf)
1289 goto out_close;
1290
1291 if (!gelf_newehdr(kcore->elf, elfclass))
1292 goto out_end;
1293
1294 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
1295
1296 return 0;
1297
1298 out_end:
1299 elf_end(kcore->elf);
1300 out_close:
1301 close(kcore->fd);
1302 unlink(filename);
1303 return -1;
1304 }
1305
1306 static void kcore__close(struct kcore *kcore)
1307 {
1308 elf_end(kcore->elf);
1309 close(kcore->fd);
1310 }
1311
1312 static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1313 {
1314 GElf_Ehdr *ehdr = &to->ehdr;
1315 GElf_Ehdr *kehdr = &from->ehdr;
1316
1317 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1318 ehdr->e_type = kehdr->e_type;
1319 ehdr->e_machine = kehdr->e_machine;
1320 ehdr->e_version = kehdr->e_version;
1321 ehdr->e_entry = 0;
1322 ehdr->e_shoff = 0;
1323 ehdr->e_flags = kehdr->e_flags;
1324 ehdr->e_phnum = count;
1325 ehdr->e_shentsize = 0;
1326 ehdr->e_shnum = 0;
1327 ehdr->e_shstrndx = 0;
1328
1329 if (from->elfclass == ELFCLASS32) {
1330 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1331 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1332 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1333 } else {
1334 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1335 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1336 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1337 }
1338
1339 if (!gelf_update_ehdr(to->elf, ehdr))
1340 return -1;
1341
1342 if (!gelf_newphdr(to->elf, count))
1343 return -1;
1344
1345 return 0;
1346 }
1347
1348 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1349 u64 addr, u64 len)
1350 {
1351 GElf_Phdr phdr = {
1352 .p_type = PT_LOAD,
1353 .p_flags = PF_R | PF_W | PF_X,
1354 .p_offset = offset,
1355 .p_vaddr = addr,
1356 .p_paddr = 0,
1357 .p_filesz = len,
1358 .p_memsz = len,
1359 .p_align = page_size,
1360 };
1361
1362 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
1363 return -1;
1364
1365 return 0;
1366 }
1367
1368 static off_t kcore__write(struct kcore *kcore)
1369 {
1370 return elf_update(kcore->elf, ELF_C_WRITE);
1371 }
1372
1373 struct phdr_data {
1374 off_t offset;
1375 u64 addr;
1376 u64 len;
1377 };
1378
1379 struct kcore_copy_info {
1380 u64 stext;
1381 u64 etext;
1382 u64 first_symbol;
1383 u64 last_symbol;
1384 u64 first_module;
1385 u64 last_module_symbol;
1386 struct phdr_data kernel_map;
1387 struct phdr_data modules_map;
1388 };
1389
1390 static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1391 u64 start)
1392 {
1393 struct kcore_copy_info *kci = arg;
1394
1395 if (!symbol_type__is_a(type, MAP__FUNCTION))
1396 return 0;
1397
1398 if (strchr(name, '[')) {
1399 if (start > kci->last_module_symbol)
1400 kci->last_module_symbol = start;
1401 return 0;
1402 }
1403
1404 if (!kci->first_symbol || start < kci->first_symbol)
1405 kci->first_symbol = start;
1406
1407 if (!kci->last_symbol || start > kci->last_symbol)
1408 kci->last_symbol = start;
1409
1410 if (!strcmp(name, "_stext")) {
1411 kci->stext = start;
1412 return 0;
1413 }
1414
1415 if (!strcmp(name, "_etext")) {
1416 kci->etext = start;
1417 return 0;
1418 }
1419
1420 return 0;
1421 }
1422
1423 static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1424 const char *dir)
1425 {
1426 char kallsyms_filename[PATH_MAX];
1427
1428 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1429
1430 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1431 return -1;
1432
1433 if (kallsyms__parse(kallsyms_filename, kci,
1434 kcore_copy__process_kallsyms) < 0)
1435 return -1;
1436
1437 return 0;
1438 }
1439
1440 static int kcore_copy__process_modules(void *arg,
1441 const char *name __maybe_unused,
1442 u64 start)
1443 {
1444 struct kcore_copy_info *kci = arg;
1445
1446 if (!kci->first_module || start < kci->first_module)
1447 kci->first_module = start;
1448
1449 return 0;
1450 }
1451
1452 static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1453 const char *dir)
1454 {
1455 char modules_filename[PATH_MAX];
1456
1457 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1458
1459 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1460 return -1;
1461
1462 if (modules__parse(modules_filename, kci,
1463 kcore_copy__process_modules) < 0)
1464 return -1;
1465
1466 return 0;
1467 }
1468
1469 static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
1470 u64 s, u64 e)
1471 {
1472 if (p->addr || s < start || s >= end)
1473 return;
1474
1475 p->addr = s;
1476 p->offset = (s - start) + pgoff;
1477 p->len = e < end ? e - s : end - s;
1478 }
1479
1480 static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1481 {
1482 struct kcore_copy_info *kci = data;
1483 u64 end = start + len;
1484
1485 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
1486 kci->etext);
1487
1488 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
1489 kci->last_module_symbol);
1490
1491 return 0;
1492 }
1493
1494 static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1495 {
1496 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1497 return -1;
1498
1499 return 0;
1500 }
1501
1502 static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1503 Elf *elf)
1504 {
1505 if (kcore_copy__parse_kallsyms(kci, dir))
1506 return -1;
1507
1508 if (kcore_copy__parse_modules(kci, dir))
1509 return -1;
1510
1511 if (kci->stext)
1512 kci->stext = round_down(kci->stext, page_size);
1513 else
1514 kci->stext = round_down(kci->first_symbol, page_size);
1515
1516 if (kci->etext) {
1517 kci->etext = round_up(kci->etext, page_size);
1518 } else if (kci->last_symbol) {
1519 kci->etext = round_up(kci->last_symbol, page_size);
1520 kci->etext += page_size;
1521 }
1522
1523 kci->first_module = round_down(kci->first_module, page_size);
1524
1525 if (kci->last_module_symbol) {
1526 kci->last_module_symbol = round_up(kci->last_module_symbol,
1527 page_size);
1528 kci->last_module_symbol += page_size;
1529 }
1530
1531 if (!kci->stext || !kci->etext)
1532 return -1;
1533
1534 if (kci->first_module && !kci->last_module_symbol)
1535 return -1;
1536
1537 return kcore_copy__read_maps(kci, elf);
1538 }
1539
1540 static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1541 const char *name)
1542 {
1543 char from_filename[PATH_MAX];
1544 char to_filename[PATH_MAX];
1545
1546 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1547 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1548
1549 return copyfile_mode(from_filename, to_filename, 0400);
1550 }
1551
1552 static int kcore_copy__unlink(const char *dir, const char *name)
1553 {
1554 char filename[PATH_MAX];
1555
1556 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1557
1558 return unlink(filename);
1559 }
1560
1561 static int kcore_copy__compare_fds(int from, int to)
1562 {
1563 char *buf_from;
1564 char *buf_to;
1565 ssize_t ret;
1566 size_t len;
1567 int err = -1;
1568
1569 buf_from = malloc(page_size);
1570 buf_to = malloc(page_size);
1571 if (!buf_from || !buf_to)
1572 goto out;
1573
1574 while (1) {
1575 /* Use read because mmap won't work on proc files */
1576 ret = read(from, buf_from, page_size);
1577 if (ret < 0)
1578 goto out;
1579
1580 if (!ret)
1581 break;
1582
1583 len = ret;
1584
1585 if (readn(to, buf_to, len) != (int)len)
1586 goto out;
1587
1588 if (memcmp(buf_from, buf_to, len))
1589 goto out;
1590 }
1591
1592 err = 0;
1593 out:
1594 free(buf_to);
1595 free(buf_from);
1596 return err;
1597 }
1598
1599 static int kcore_copy__compare_files(const char *from_filename,
1600 const char *to_filename)
1601 {
1602 int from, to, err = -1;
1603
1604 from = open(from_filename, O_RDONLY);
1605 if (from < 0)
1606 return -1;
1607
1608 to = open(to_filename, O_RDONLY);
1609 if (to < 0)
1610 goto out_close_from;
1611
1612 err = kcore_copy__compare_fds(from, to);
1613
1614 close(to);
1615 out_close_from:
1616 close(from);
1617 return err;
1618 }
1619
1620 static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1621 const char *name)
1622 {
1623 char from_filename[PATH_MAX];
1624 char to_filename[PATH_MAX];
1625
1626 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1627 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1628
1629 return kcore_copy__compare_files(from_filename, to_filename);
1630 }
1631
1632 /**
1633 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1634 * @from_dir: from directory
1635 * @to_dir: to directory
1636 *
1637 * This function copies kallsyms, modules and kcore files from one directory to
1638 * another. kallsyms and modules are copied entirely. Only code segments are
1639 * copied from kcore. It is assumed that two segments suffice: one for the
1640 * kernel proper and one for all the modules. The code segments are determined
1641 * from kallsyms and modules files. The kernel map starts at _stext or the
1642 * lowest function symbol, and ends at _etext or the highest function symbol.
1643 * The module map starts at the lowest module address and ends at the highest
1644 * module symbol. Start addresses are rounded down to the nearest page. End
1645 * addresses are rounded up to the nearest page. An extra page is added to the
1646 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1647 * symbol too. Because it contains only code sections, the resulting kcore is
1648 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1649 * is not the same for the kernel map and the modules map. That happens because
1650 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1651 * kallsyms and modules files are compared with their copies to check that
1652 * modules have not been loaded or unloaded while the copies were taking place.
1653 *
1654 * Return: %0 on success, %-1 on failure.
1655 */
1656 int kcore_copy(const char *from_dir, const char *to_dir)
1657 {
1658 struct kcore kcore;
1659 struct kcore extract;
1660 size_t count = 2;
1661 int idx = 0, err = -1;
1662 off_t offset = page_size, sz, modules_offset = 0;
1663 struct kcore_copy_info kci = { .stext = 0, };
1664 char kcore_filename[PATH_MAX];
1665 char extract_filename[PATH_MAX];
1666
1667 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1668 return -1;
1669
1670 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1671 goto out_unlink_kallsyms;
1672
1673 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1674 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1675
1676 if (kcore__open(&kcore, kcore_filename))
1677 goto out_unlink_modules;
1678
1679 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1680 goto out_kcore_close;
1681
1682 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1683 goto out_kcore_close;
1684
1685 if (!kci.modules_map.addr)
1686 count -= 1;
1687
1688 if (kcore__copy_hdr(&kcore, &extract, count))
1689 goto out_extract_close;
1690
1691 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
1692 kci.kernel_map.len))
1693 goto out_extract_close;
1694
1695 if (kci.modules_map.addr) {
1696 modules_offset = offset + kci.kernel_map.len;
1697 if (kcore__add_phdr(&extract, idx, modules_offset,
1698 kci.modules_map.addr, kci.modules_map.len))
1699 goto out_extract_close;
1700 }
1701
1702 sz = kcore__write(&extract);
1703 if (sz < 0 || sz > offset)
1704 goto out_extract_close;
1705
1706 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
1707 kci.kernel_map.len))
1708 goto out_extract_close;
1709
1710 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
1711 extract.fd, modules_offset,
1712 kci.modules_map.len))
1713 goto out_extract_close;
1714
1715 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1716 goto out_extract_close;
1717
1718 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1719 goto out_extract_close;
1720
1721 err = 0;
1722
1723 out_extract_close:
1724 kcore__close(&extract);
1725 if (err)
1726 unlink(extract_filename);
1727 out_kcore_close:
1728 kcore__close(&kcore);
1729 out_unlink_modules:
1730 if (err)
1731 kcore_copy__unlink(to_dir, "modules");
1732 out_unlink_kallsyms:
1733 if (err)
1734 kcore_copy__unlink(to_dir, "kallsyms");
1735
1736 return err;
1737 }
1738
1739 int kcore_extract__create(struct kcore_extract *kce)
1740 {
1741 struct kcore kcore;
1742 struct kcore extract;
1743 size_t count = 1;
1744 int idx = 0, err = -1;
1745 off_t offset = page_size, sz;
1746
1747 if (kcore__open(&kcore, kce->kcore_filename))
1748 return -1;
1749
1750 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1751 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1752 goto out_kcore_close;
1753
1754 if (kcore__copy_hdr(&kcore, &extract, count))
1755 goto out_extract_close;
1756
1757 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1758 goto out_extract_close;
1759
1760 sz = kcore__write(&extract);
1761 if (sz < 0 || sz > offset)
1762 goto out_extract_close;
1763
1764 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1765 goto out_extract_close;
1766
1767 err = 0;
1768
1769 out_extract_close:
1770 kcore__close(&extract);
1771 if (err)
1772 unlink(kce->extract_filename);
1773 out_kcore_close:
1774 kcore__close(&kcore);
1775
1776 return err;
1777 }
1778
1779 void kcore_extract__delete(struct kcore_extract *kce)
1780 {
1781 unlink(kce->extract_filename);
1782 }
1783
1784 void symbol__elf_init(void)
1785 {
1786 elf_version(EV_CURRENT);
1787 }
This page took 0.067579 seconds and 5 git commands to generate.