9 #include "demangle-java.h"
12 #include <symbol/kallsyms.h>
16 #define EM_AARCH64 183 /* ARM 64 bit */
20 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
21 extern char *cplus_demangle(const char *, int);
23 static inline char *bfd_demangle(void __maybe_unused
*v
, const char *c
, int i
)
25 return cplus_demangle(c
, i
);
29 static inline char *bfd_demangle(void __maybe_unused
*v
,
30 const char __maybe_unused
*c
,
36 #define PACKAGE 'perf'
41 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
42 static int elf_getphdrnum(Elf
*elf
, size_t *dst
)
47 ehdr
= gelf_getehdr(elf
, &gehdr
);
57 #ifndef NT_GNU_BUILD_ID
58 #define NT_GNU_BUILD_ID 3
62 * elf_symtab__for_each_symbol - iterate thru all the symbols
64 * @syms: struct elf_symtab instance to iterate
66 * @sym: GElf_Sym iterator
68 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
69 for (idx = 0, gelf_getsym(syms, idx, &sym);\
71 idx++, gelf_getsym(syms, idx, &sym))
73 static inline uint8_t elf_sym__type(const GElf_Sym
*sym
)
75 return GELF_ST_TYPE(sym
->st_info
);
79 #define STT_GNU_IFUNC 10
82 static inline int elf_sym__is_function(const GElf_Sym
*sym
)
84 return (elf_sym__type(sym
) == STT_FUNC
||
85 elf_sym__type(sym
) == STT_GNU_IFUNC
) &&
87 sym
->st_shndx
!= SHN_UNDEF
;
90 static inline bool elf_sym__is_object(const GElf_Sym
*sym
)
92 return elf_sym__type(sym
) == STT_OBJECT
&&
94 sym
->st_shndx
!= SHN_UNDEF
;
97 static inline int elf_sym__is_label(const GElf_Sym
*sym
)
99 return elf_sym__type(sym
) == STT_NOTYPE
&&
101 sym
->st_shndx
!= SHN_UNDEF
&&
102 sym
->st_shndx
!= SHN_ABS
;
105 static bool elf_sym__is_a(GElf_Sym
*sym
, enum map_type type
)
109 return elf_sym__is_function(sym
);
111 return elf_sym__is_object(sym
);
117 static inline const char *elf_sym__name(const GElf_Sym
*sym
,
118 const Elf_Data
*symstrs
)
120 return symstrs
->d_buf
+ sym
->st_name
;
123 static inline const char *elf_sec__name(const GElf_Shdr
*shdr
,
124 const Elf_Data
*secstrs
)
126 return secstrs
->d_buf
+ shdr
->sh_name
;
129 static inline int elf_sec__is_text(const GElf_Shdr
*shdr
,
130 const Elf_Data
*secstrs
)
132 return strstr(elf_sec__name(shdr
, secstrs
), "text") != NULL
;
135 static inline bool elf_sec__is_data(const GElf_Shdr
*shdr
,
136 const Elf_Data
*secstrs
)
138 return strstr(elf_sec__name(shdr
, secstrs
), "data") != NULL
;
141 static bool elf_sec__is_a(GElf_Shdr
*shdr
, Elf_Data
*secstrs
,
146 return elf_sec__is_text(shdr
, secstrs
);
148 return elf_sec__is_data(shdr
, secstrs
);
154 static size_t elf_addr_to_index(Elf
*elf
, GElf_Addr addr
)
160 while ((sec
= elf_nextscn(elf
, sec
)) != NULL
) {
161 gelf_getshdr(sec
, &shdr
);
163 if ((addr
>= shdr
.sh_addr
) &&
164 (addr
< (shdr
.sh_addr
+ shdr
.sh_size
)))
173 Elf_Scn
*elf_section_by_name(Elf
*elf
, GElf_Ehdr
*ep
,
174 GElf_Shdr
*shp
, const char *name
, size_t *idx
)
179 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
180 if (!elf_rawdata(elf_getscn(elf
, ep
->e_shstrndx
), NULL
))
183 while ((sec
= elf_nextscn(elf
, sec
)) != NULL
) {
186 gelf_getshdr(sec
, shp
);
187 str
= elf_strptr(elf
, ep
->e_shstrndx
, shp
->sh_name
);
188 if (str
&& !strcmp(name
, str
)) {
199 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
200 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
202 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
204 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
205 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
207 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
210 * We need to check if we have a .dynsym, so that we can handle the
211 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
212 * .dynsym or .symtab).
213 * And always look at the original dso, not at debuginfo packages, that
214 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
216 int dso__synthesize_plt_symbols(struct dso
*dso
, struct symsrc
*ss
, struct map
*map
,
217 symbol_filter_t filter
)
219 uint32_t nr_rel_entries
, idx
;
224 GElf_Shdr shdr_rel_plt
, shdr_dynsym
;
225 Elf_Data
*reldata
, *syms
, *symstrs
;
226 Elf_Scn
*scn_plt_rel
, *scn_symstrs
, *scn_dynsym
;
229 char sympltname
[1024];
231 int nr
= 0, symidx
, err
= 0;
239 scn_dynsym
= ss
->dynsym
;
240 shdr_dynsym
= ss
->dynshdr
;
241 dynsym_idx
= ss
->dynsym_idx
;
243 if (scn_dynsym
== NULL
)
246 scn_plt_rel
= elf_section_by_name(elf
, &ehdr
, &shdr_rel_plt
,
248 if (scn_plt_rel
== NULL
) {
249 scn_plt_rel
= elf_section_by_name(elf
, &ehdr
, &shdr_rel_plt
,
251 if (scn_plt_rel
== NULL
)
257 if (shdr_rel_plt
.sh_link
!= dynsym_idx
)
260 if (elf_section_by_name(elf
, &ehdr
, &shdr_plt
, ".plt", NULL
) == NULL
)
264 * Fetch the relocation section to find the idxes to the GOT
265 * and the symbols in the .dynsym they refer to.
267 reldata
= elf_getdata(scn_plt_rel
, NULL
);
271 syms
= elf_getdata(scn_dynsym
, NULL
);
275 scn_symstrs
= elf_getscn(elf
, shdr_dynsym
.sh_link
);
276 if (scn_symstrs
== NULL
)
279 symstrs
= elf_getdata(scn_symstrs
, NULL
);
283 if (symstrs
->d_size
== 0)
286 nr_rel_entries
= shdr_rel_plt
.sh_size
/ shdr_rel_plt
.sh_entsize
;
287 plt_offset
= shdr_plt
.sh_offset
;
289 if (shdr_rel_plt
.sh_type
== SHT_RELA
) {
290 GElf_Rela pos_mem
, *pos
;
292 elf_section__for_each_rela(reldata
, pos
, pos_mem
, idx
,
294 symidx
= GELF_R_SYM(pos
->r_info
);
295 plt_offset
+= shdr_plt
.sh_entsize
;
296 gelf_getsym(syms
, symidx
, &sym
);
297 snprintf(sympltname
, sizeof(sympltname
),
298 "%s@plt", elf_sym__name(&sym
, symstrs
));
300 f
= symbol__new(plt_offset
, shdr_plt
.sh_entsize
,
301 STB_GLOBAL
, sympltname
);
305 if (filter
&& filter(map
, f
))
308 symbols__insert(&dso
->symbols
[map
->type
], f
);
312 } else if (shdr_rel_plt
.sh_type
== SHT_REL
) {
313 GElf_Rel pos_mem
, *pos
;
314 elf_section__for_each_rel(reldata
, pos
, pos_mem
, idx
,
316 symidx
= GELF_R_SYM(pos
->r_info
);
317 plt_offset
+= shdr_plt
.sh_entsize
;
318 gelf_getsym(syms
, symidx
, &sym
);
319 snprintf(sympltname
, sizeof(sympltname
),
320 "%s@plt", elf_sym__name(&sym
, symstrs
));
322 f
= symbol__new(plt_offset
, shdr_plt
.sh_entsize
,
323 STB_GLOBAL
, sympltname
);
327 if (filter
&& filter(map
, f
))
330 symbols__insert(&dso
->symbols
[map
->type
], f
);
340 pr_debug("%s: problems reading %s PLT info.\n",
341 __func__
, dso
->long_name
);
346 * Align offset to 4 bytes as needed for note name and descriptor data.
348 #define NOTE_ALIGN(n) (((n) + 3) & -4U)
350 static int elf_read_build_id(Elf
*elf
, void *bf
, size_t size
)
360 if (size
< BUILD_ID_SIZE
)
367 if (gelf_getehdr(elf
, &ehdr
) == NULL
) {
368 pr_err("%s: cannot get elf header.\n", __func__
);
373 * Check following sections for notes:
374 * '.note.gnu.build-id'
376 * '.note' (VDSO specific)
379 sec
= elf_section_by_name(elf
, &ehdr
, &shdr
,
380 ".note.gnu.build-id", NULL
);
384 sec
= elf_section_by_name(elf
, &ehdr
, &shdr
,
389 sec
= elf_section_by_name(elf
, &ehdr
, &shdr
,
398 data
= elf_getdata(sec
, NULL
);
403 while (ptr
< (data
->d_buf
+ data
->d_size
)) {
404 GElf_Nhdr
*nhdr
= ptr
;
405 size_t namesz
= NOTE_ALIGN(nhdr
->n_namesz
),
406 descsz
= NOTE_ALIGN(nhdr
->n_descsz
);
409 ptr
+= sizeof(*nhdr
);
412 if (nhdr
->n_type
== NT_GNU_BUILD_ID
&&
413 nhdr
->n_namesz
== sizeof("GNU")) {
414 if (memcmp(name
, "GNU", sizeof("GNU")) == 0) {
415 size_t sz
= min(size
, descsz
);
417 memset(bf
+ sz
, 0, size
- sz
);
429 int filename__read_build_id(const char *filename
, void *bf
, size_t size
)
434 if (size
< BUILD_ID_SIZE
)
437 fd
= open(filename
, O_RDONLY
);
441 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
443 pr_debug2("%s: cannot read %s ELF file.\n", __func__
, filename
);
447 err
= elf_read_build_id(elf
, bf
, size
);
456 int sysfs__read_build_id(const char *filename
, void *build_id
, size_t size
)
460 if (size
< BUILD_ID_SIZE
)
463 fd
= open(filename
, O_RDONLY
);
470 size_t namesz
, descsz
;
472 if (read(fd
, &nhdr
, sizeof(nhdr
)) != sizeof(nhdr
))
475 namesz
= NOTE_ALIGN(nhdr
.n_namesz
);
476 descsz
= NOTE_ALIGN(nhdr
.n_descsz
);
477 if (nhdr
.n_type
== NT_GNU_BUILD_ID
&&
478 nhdr
.n_namesz
== sizeof("GNU")) {
479 if (read(fd
, bf
, namesz
) != (ssize_t
)namesz
)
481 if (memcmp(bf
, "GNU", sizeof("GNU")) == 0) {
482 size_t sz
= min(descsz
, size
);
483 if (read(fd
, build_id
, sz
) == (ssize_t
)sz
) {
484 memset(build_id
+ sz
, 0, size
- sz
);
488 } else if (read(fd
, bf
, descsz
) != (ssize_t
)descsz
)
491 int n
= namesz
+ descsz
;
492 if (read(fd
, bf
, n
) != n
)
501 int filename__read_debuglink(const char *filename
, char *debuglink
,
512 fd
= open(filename
, O_RDONLY
);
516 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
518 pr_debug2("%s: cannot read %s ELF file.\n", __func__
, filename
);
526 if (gelf_getehdr(elf
, &ehdr
) == NULL
) {
527 pr_err("%s: cannot get elf header.\n", __func__
);
531 sec
= elf_section_by_name(elf
, &ehdr
, &shdr
,
532 ".gnu_debuglink", NULL
);
536 data
= elf_getdata(sec
, NULL
);
540 /* the start of this section is a zero-terminated string */
541 strncpy(debuglink
, data
->d_buf
, size
);
553 static int dso__swap_init(struct dso
*dso
, unsigned char eidata
)
555 static unsigned int const endian
= 1;
557 dso
->needs_swap
= DSO_SWAP__NO
;
561 /* We are big endian, DSO is little endian. */
562 if (*(unsigned char const *)&endian
!= 1)
563 dso
->needs_swap
= DSO_SWAP__YES
;
567 /* We are little endian, DSO is big endian. */
568 if (*(unsigned char const *)&endian
!= 0)
569 dso
->needs_swap
= DSO_SWAP__YES
;
573 pr_err("unrecognized DSO data encoding %d\n", eidata
);
580 static int decompress_kmodule(struct dso
*dso
, const char *name
,
581 enum dso_binary_type type
)
584 char tmpbuf
[] = "/tmp/perf-kmod-XXXXXX";
587 if (type
!= DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP
&&
588 type
!= DSO_BINARY_TYPE__GUEST_KMODULE_COMP
&&
589 type
!= DSO_BINARY_TYPE__BUILD_ID_CACHE
)
592 if (type
== DSO_BINARY_TYPE__BUILD_ID_CACHE
)
593 name
= dso
->long_name
;
595 if (kmod_path__parse_ext(&m
, name
) || !m
.comp
)
598 fd
= mkstemp(tmpbuf
);
600 dso
->load_errno
= errno
;
604 if (!decompress_to_file(m
.ext
, name
, fd
)) {
605 dso
->load_errno
= DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE
;
617 bool symsrc__possibly_runtime(struct symsrc
*ss
)
619 return ss
->dynsym
|| ss
->opdsec
;
622 bool symsrc__has_symtab(struct symsrc
*ss
)
624 return ss
->symtab
!= NULL
;
627 void symsrc__destroy(struct symsrc
*ss
)
634 bool __weak
elf__needs_adjust_symbols(GElf_Ehdr ehdr
)
636 return ehdr
.e_type
== ET_EXEC
|| ehdr
.e_type
== ET_REL
;
639 int symsrc__init(struct symsrc
*ss
, struct dso
*dso
, const char *name
,
640 enum dso_binary_type type
)
647 if (dso__needs_decompress(dso
)) {
648 fd
= decompress_kmodule(dso
, name
, type
);
652 fd
= open(name
, O_RDONLY
);
654 dso
->load_errno
= errno
;
659 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
661 pr_debug("%s: cannot read %s ELF file.\n", __func__
, name
);
662 dso
->load_errno
= DSO_LOAD_ERRNO__INVALID_ELF
;
666 if (gelf_getehdr(elf
, &ehdr
) == NULL
) {
667 dso
->load_errno
= DSO_LOAD_ERRNO__INVALID_ELF
;
668 pr_debug("%s: cannot get elf header.\n", __func__
);
672 if (dso__swap_init(dso
, ehdr
.e_ident
[EI_DATA
])) {
673 dso
->load_errno
= DSO_LOAD_ERRNO__INTERNAL_ERROR
;
677 /* Always reject images with a mismatched build-id: */
678 if (dso
->has_build_id
) {
679 u8 build_id
[BUILD_ID_SIZE
];
681 if (elf_read_build_id(elf
, build_id
, BUILD_ID_SIZE
) < 0) {
682 dso
->load_errno
= DSO_LOAD_ERRNO__CANNOT_READ_BUILDID
;
686 if (!dso__build_id_equal(dso
, build_id
)) {
687 pr_debug("%s: build id mismatch for %s.\n", __func__
, name
);
688 dso
->load_errno
= DSO_LOAD_ERRNO__MISMATCHING_BUILDID
;
693 ss
->is_64_bit
= (gelf_getclass(elf
) == ELFCLASS64
);
695 ss
->symtab
= elf_section_by_name(elf
, &ehdr
, &ss
->symshdr
, ".symtab",
697 if (ss
->symshdr
.sh_type
!= SHT_SYMTAB
)
701 ss
->dynsym
= elf_section_by_name(elf
, &ehdr
, &ss
->dynshdr
, ".dynsym",
703 if (ss
->dynshdr
.sh_type
!= SHT_DYNSYM
)
707 ss
->opdsec
= elf_section_by_name(elf
, &ehdr
, &ss
->opdshdr
, ".opd",
709 if (ss
->opdshdr
.sh_type
!= SHT_PROGBITS
)
712 if (dso
->kernel
== DSO_TYPE_USER
)
713 ss
->adjust_symbols
= true;
715 ss
->adjust_symbols
= elf__needs_adjust_symbols(ehdr
);
717 ss
->name
= strdup(name
);
719 dso
->load_errno
= errno
;
738 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
739 * @kmap: kernel maps and relocation reference symbol
741 * This function returns %true if we are dealing with the kernel maps and the
742 * relocation reference symbol has not yet been found. Otherwise %false is
745 static bool ref_reloc_sym_not_found(struct kmap
*kmap
)
747 return kmap
&& kmap
->ref_reloc_sym
&& kmap
->ref_reloc_sym
->name
&&
748 !kmap
->ref_reloc_sym
->unrelocated_addr
;
752 * ref_reloc - kernel relocation offset.
753 * @kmap: kernel maps and relocation reference symbol
755 * This function returns the offset of kernel addresses as determined by using
756 * the relocation reference symbol i.e. if the kernel has not been relocated
757 * then the return value is zero.
759 static u64
ref_reloc(struct kmap
*kmap
)
761 if (kmap
&& kmap
->ref_reloc_sym
&&
762 kmap
->ref_reloc_sym
->unrelocated_addr
)
763 return kmap
->ref_reloc_sym
->addr
-
764 kmap
->ref_reloc_sym
->unrelocated_addr
;
768 static bool want_demangle(bool is_kernel_sym
)
770 return is_kernel_sym
? symbol_conf
.demangle_kernel
: symbol_conf
.demangle
;
773 void __weak
arch__sym_update(struct symbol
*s __maybe_unused
,
774 GElf_Sym
*sym __maybe_unused
) { }
776 int dso__load_sym(struct dso
*dso
, struct map
*map
,
777 struct symsrc
*syms_ss
, struct symsrc
*runtime_ss
,
778 symbol_filter_t filter
, int kmodule
)
780 struct kmap
*kmap
= dso
->kernel
? map__kmap(map
) : NULL
;
781 struct map_groups
*kmaps
= kmap
? map__kmaps(map
) : NULL
;
782 struct map
*curr_map
= map
;
783 struct dso
*curr_dso
= dso
;
784 Elf_Data
*symstrs
, *secstrs
;
791 Elf_Data
*syms
, *opddata
= NULL
;
793 Elf_Scn
*sec
, *sec_strndx
;
796 bool remap_kernel
= false, adjust_kernel_syms
= false;
801 dso
->symtab_type
= syms_ss
->type
;
802 dso
->is_64_bit
= syms_ss
->is_64_bit
;
803 dso
->rel
= syms_ss
->ehdr
.e_type
== ET_REL
;
806 * Modules may already have symbols from kallsyms, but those symbols
807 * have the wrong values for the dso maps, so remove them.
809 if (kmodule
&& syms_ss
->symtab
)
810 symbols__delete(&dso
->symbols
[map
->type
]);
812 if (!syms_ss
->symtab
) {
814 * If the vmlinux is stripped, fail so we will fall back
815 * to using kallsyms. The vmlinux runtime symbols aren't
821 syms_ss
->symtab
= syms_ss
->dynsym
;
822 syms_ss
->symshdr
= syms_ss
->dynshdr
;
826 ehdr
= syms_ss
->ehdr
;
827 sec
= syms_ss
->symtab
;
828 shdr
= syms_ss
->symshdr
;
830 if (elf_section_by_name(elf
, &ehdr
, &tshdr
, ".text", NULL
))
831 dso
->text_offset
= tshdr
.sh_addr
- tshdr
.sh_offset
;
833 if (runtime_ss
->opdsec
)
834 opddata
= elf_rawdata(runtime_ss
->opdsec
, NULL
);
836 syms
= elf_getdata(sec
, NULL
);
840 sec
= elf_getscn(elf
, shdr
.sh_link
);
844 symstrs
= elf_getdata(sec
, NULL
);
848 sec_strndx
= elf_getscn(runtime_ss
->elf
, runtime_ss
->ehdr
.e_shstrndx
);
849 if (sec_strndx
== NULL
)
852 secstrs
= elf_getdata(sec_strndx
, NULL
);
856 nr_syms
= shdr
.sh_size
/ shdr
.sh_entsize
;
858 memset(&sym
, 0, sizeof(sym
));
861 * The kernel relocation symbol is needed in advance in order to adjust
862 * kernel maps correctly.
864 if (ref_reloc_sym_not_found(kmap
)) {
865 elf_symtab__for_each_symbol(syms
, nr_syms
, idx
, sym
) {
866 const char *elf_name
= elf_sym__name(&sym
, symstrs
);
868 if (strcmp(elf_name
, kmap
->ref_reloc_sym
->name
))
870 kmap
->ref_reloc_sym
->unrelocated_addr
= sym
.st_value
;
871 map
->reloc
= kmap
->ref_reloc_sym
->addr
-
872 kmap
->ref_reloc_sym
->unrelocated_addr
;
878 * Handle any relocation of vdso necessary because older kernels
879 * attempted to prelink vdso to its virtual address.
881 if (dso__is_vdso(dso
))
882 map
->reloc
= map
->start
- dso
->text_offset
;
884 dso
->adjust_symbols
= runtime_ss
->adjust_symbols
|| ref_reloc(kmap
);
886 * Initial kernel and module mappings do not map to the dso. For
887 * function mappings, flag the fixups.
889 if (map
->type
== MAP__FUNCTION
&& (dso
->kernel
|| kmodule
)) {
891 adjust_kernel_syms
= dso
->adjust_symbols
;
893 elf_symtab__for_each_symbol(syms
, nr_syms
, idx
, sym
) {
895 const char *elf_name
= elf_sym__name(&sym
, symstrs
);
896 char *demangled
= NULL
;
897 int is_label
= elf_sym__is_label(&sym
);
898 const char *section_name
;
899 bool used_opd
= false;
901 if (!is_label
&& !elf_sym__is_a(&sym
, map
->type
))
904 /* Reject ARM ELF "mapping symbols": these aren't unique and
905 * don't identify functions, so will confuse the profile
907 if (ehdr
.e_machine
== EM_ARM
|| ehdr
.e_machine
== EM_AARCH64
) {
908 if (elf_name
[0] == '$' && strchr("adtx", elf_name
[1])
909 && (elf_name
[2] == '\0' || elf_name
[2] == '.'))
913 if (runtime_ss
->opdsec
&& sym
.st_shndx
== runtime_ss
->opdidx
) {
914 u32 offset
= sym
.st_value
- syms_ss
->opdshdr
.sh_addr
;
915 u64
*opd
= opddata
->d_buf
+ offset
;
916 sym
.st_value
= DSO__SWAP(dso
, u64
, *opd
);
917 sym
.st_shndx
= elf_addr_to_index(runtime_ss
->elf
,
922 * When loading symbols in a data mapping, ABS symbols (which
923 * has a value of SHN_ABS in its st_shndx) failed at
924 * elf_getscn(). And it marks the loading as a failure so
925 * already loaded symbols cannot be fixed up.
927 * I'm not sure what should be done. Just ignore them for now.
930 if (sym
.st_shndx
== SHN_ABS
)
933 sec
= elf_getscn(runtime_ss
->elf
, sym
.st_shndx
);
937 gelf_getshdr(sec
, &shdr
);
939 if (is_label
&& !elf_sec__is_a(&shdr
, secstrs
, map
->type
))
942 section_name
= elf_sec__name(&shdr
, secstrs
);
944 /* On ARM, symbols for thumb functions have 1 added to
945 * the symbol address as a flag - remove it */
946 if ((ehdr
.e_machine
== EM_ARM
) &&
947 (map
->type
== MAP__FUNCTION
) &&
951 if (dso
->kernel
|| kmodule
) {
952 char dso_name
[PATH_MAX
];
954 /* Adjust symbol to map to file offset */
955 if (adjust_kernel_syms
)
956 sym
.st_value
-= shdr
.sh_addr
- shdr
.sh_offset
;
958 if (strcmp(section_name
,
959 (curr_dso
->short_name
+
960 dso
->short_name_len
)) == 0)
963 if (strcmp(section_name
, ".text") == 0) {
965 * The initial kernel mapping is based on
966 * kallsyms and identity maps. Overwrite it to
967 * map to the kernel dso.
969 if (remap_kernel
&& dso
->kernel
) {
970 remap_kernel
= false;
971 map
->start
= shdr
.sh_addr
+
973 map
->end
= map
->start
+ shdr
.sh_size
;
974 map
->pgoff
= shdr
.sh_offset
;
975 map
->map_ip
= map__map_ip
;
976 map
->unmap_ip
= map__unmap_ip
;
977 /* Ensure maps are correctly ordered */
980 map_groups__remove(kmaps
, map
);
981 map_groups__insert(kmaps
, map
);
987 * The initial module mapping is based on
988 * /proc/modules mapped to offset zero.
989 * Overwrite it to map to the module dso.
991 if (remap_kernel
&& kmodule
) {
992 remap_kernel
= false;
993 map
->pgoff
= shdr
.sh_offset
;
1004 snprintf(dso_name
, sizeof(dso_name
),
1005 "%s%s", dso
->short_name
, section_name
);
1007 curr_map
= map_groups__find_by_name(kmaps
, map
->type
, dso_name
);
1008 if (curr_map
== NULL
) {
1009 u64 start
= sym
.st_value
;
1012 start
+= map
->start
+ shdr
.sh_offset
;
1014 curr_dso
= dso__new(dso_name
);
1015 if (curr_dso
== NULL
)
1017 curr_dso
->kernel
= dso
->kernel
;
1018 curr_dso
->long_name
= dso
->long_name
;
1019 curr_dso
->long_name_len
= dso
->long_name_len
;
1020 curr_map
= map__new2(start
, curr_dso
,
1023 if (curr_map
== NULL
) {
1026 if (adjust_kernel_syms
) {
1027 curr_map
->start
= shdr
.sh_addr
+
1029 curr_map
->end
= curr_map
->start
+
1031 curr_map
->pgoff
= shdr
.sh_offset
;
1033 curr_map
->map_ip
= identity__map_ip
;
1034 curr_map
->unmap_ip
= identity__map_ip
;
1036 curr_dso
->symtab_type
= dso
->symtab_type
;
1037 map_groups__insert(kmaps
, curr_map
);
1039 * Add it before we drop the referece to curr_map,
1040 * i.e. while we still are sure to have a reference
1041 * to this DSO via curr_map->dso.
1043 dsos__add(&map
->groups
->machine
->dsos
, curr_dso
);
1044 /* kmaps already got it */
1046 dso__set_loaded(curr_dso
, map
->type
);
1048 curr_dso
= curr_map
->dso
;
1053 if ((used_opd
&& runtime_ss
->adjust_symbols
)
1054 || (!used_opd
&& syms_ss
->adjust_symbols
)) {
1055 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64
" "
1056 "sh_addr: %#" PRIx64
" sh_offset: %#" PRIx64
"\n", __func__
,
1057 (u64
)sym
.st_value
, (u64
)shdr
.sh_addr
,
1058 (u64
)shdr
.sh_offset
);
1059 sym
.st_value
-= shdr
.sh_addr
- shdr
.sh_offset
;
1063 * We need to figure out if the object was created from C++ sources
1064 * DWARF DW_compile_unit has this, but we don't always have access
1067 if (want_demangle(dso
->kernel
|| kmodule
)) {
1068 int demangle_flags
= DMGL_NO_OPTS
;
1070 demangle_flags
= DMGL_PARAMS
| DMGL_ANSI
;
1072 demangled
= bfd_demangle(NULL
, elf_name
, demangle_flags
);
1073 if (demangled
== NULL
)
1074 demangled
= java_demangle_sym(elf_name
, JAVA_DEMANGLE_NORET
);
1075 if (demangled
!= NULL
)
1076 elf_name
= demangled
;
1078 f
= symbol__new(sym
.st_value
, sym
.st_size
,
1079 GELF_ST_BIND(sym
.st_info
), elf_name
);
1084 arch__sym_update(f
, &sym
);
1086 if (filter
&& filter(curr_map
, f
))
1089 symbols__insert(&curr_dso
->symbols
[curr_map
->type
], f
);
1095 * For misannotated, zeroed, ASM function sizes.
1098 if (!symbol_conf
.allow_aliases
)
1099 symbols__fixup_duplicate(&dso
->symbols
[map
->type
]);
1100 symbols__fixup_end(&dso
->symbols
[map
->type
]);
1103 * We need to fixup this here too because we create new
1104 * maps here, for things like vsyscall sections.
1106 __map_groups__fixup_end(kmaps
, map
->type
);
1114 static int elf_read_maps(Elf
*elf
, bool exe
, mapfn_t mapfn
, void *data
)
1121 if (elf_getphdrnum(elf
, &phdrnum
))
1124 for (i
= 0; i
< phdrnum
; i
++) {
1125 if (gelf_getphdr(elf
, i
, &phdr
) == NULL
)
1127 if (phdr
.p_type
!= PT_LOAD
)
1130 if (!(phdr
.p_flags
& PF_X
))
1133 if (!(phdr
.p_flags
& PF_R
))
1136 sz
= min(phdr
.p_memsz
, phdr
.p_filesz
);
1139 err
= mapfn(phdr
.p_vaddr
, sz
, phdr
.p_offset
, data
);
1146 int file__read_maps(int fd
, bool exe
, mapfn_t mapfn
, void *data
,
1152 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
1157 *is_64_bit
= (gelf_getclass(elf
) == ELFCLASS64
);
1159 err
= elf_read_maps(elf
, exe
, mapfn
, data
);
1165 enum dso_type
dso__type_fd(int fd
)
1167 enum dso_type dso_type
= DSO__TYPE_UNKNOWN
;
1172 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
1177 if (ek
!= ELF_K_ELF
)
1180 if (gelf_getclass(elf
) == ELFCLASS64
) {
1181 dso_type
= DSO__TYPE_64BIT
;
1185 if (gelf_getehdr(elf
, &ehdr
) == NULL
)
1188 if (ehdr
.e_machine
== EM_X86_64
)
1189 dso_type
= DSO__TYPE_X32BIT
;
1191 dso_type
= DSO__TYPE_32BIT
;
1198 static int copy_bytes(int from
, off_t from_offs
, int to
, off_t to_offs
, u64 len
)
1203 char *buf
= malloc(page_size
);
1208 if (lseek(to
, to_offs
, SEEK_SET
) != to_offs
)
1211 if (lseek(from
, from_offs
, SEEK_SET
) != from_offs
)
1218 /* Use read because mmap won't work on proc files */
1219 r
= read(from
, buf
, n
);
1225 r
= write(to
, buf
, n
);
1246 static int kcore__open(struct kcore
*kcore
, const char *filename
)
1250 kcore
->fd
= open(filename
, O_RDONLY
);
1251 if (kcore
->fd
== -1)
1254 kcore
->elf
= elf_begin(kcore
->fd
, ELF_C_READ
, NULL
);
1258 kcore
->elfclass
= gelf_getclass(kcore
->elf
);
1259 if (kcore
->elfclass
== ELFCLASSNONE
)
1262 ehdr
= gelf_getehdr(kcore
->elf
, &kcore
->ehdr
);
1269 elf_end(kcore
->elf
);
1275 static int kcore__init(struct kcore
*kcore
, char *filename
, int elfclass
,
1278 kcore
->elfclass
= elfclass
;
1281 kcore
->fd
= mkstemp(filename
);
1283 kcore
->fd
= open(filename
, O_WRONLY
| O_CREAT
| O_EXCL
, 0400);
1284 if (kcore
->fd
== -1)
1287 kcore
->elf
= elf_begin(kcore
->fd
, ELF_C_WRITE
, NULL
);
1291 if (!gelf_newehdr(kcore
->elf
, elfclass
))
1294 memset(&kcore
->ehdr
, 0, sizeof(GElf_Ehdr
));
1299 elf_end(kcore
->elf
);
1306 static void kcore__close(struct kcore
*kcore
)
1308 elf_end(kcore
->elf
);
1312 static int kcore__copy_hdr(struct kcore
*from
, struct kcore
*to
, size_t count
)
1314 GElf_Ehdr
*ehdr
= &to
->ehdr
;
1315 GElf_Ehdr
*kehdr
= &from
->ehdr
;
1317 memcpy(ehdr
->e_ident
, kehdr
->e_ident
, EI_NIDENT
);
1318 ehdr
->e_type
= kehdr
->e_type
;
1319 ehdr
->e_machine
= kehdr
->e_machine
;
1320 ehdr
->e_version
= kehdr
->e_version
;
1323 ehdr
->e_flags
= kehdr
->e_flags
;
1324 ehdr
->e_phnum
= count
;
1325 ehdr
->e_shentsize
= 0;
1327 ehdr
->e_shstrndx
= 0;
1329 if (from
->elfclass
== ELFCLASS32
) {
1330 ehdr
->e_phoff
= sizeof(Elf32_Ehdr
);
1331 ehdr
->e_ehsize
= sizeof(Elf32_Ehdr
);
1332 ehdr
->e_phentsize
= sizeof(Elf32_Phdr
);
1334 ehdr
->e_phoff
= sizeof(Elf64_Ehdr
);
1335 ehdr
->e_ehsize
= sizeof(Elf64_Ehdr
);
1336 ehdr
->e_phentsize
= sizeof(Elf64_Phdr
);
1339 if (!gelf_update_ehdr(to
->elf
, ehdr
))
1342 if (!gelf_newphdr(to
->elf
, count
))
1348 static int kcore__add_phdr(struct kcore
*kcore
, int idx
, off_t offset
,
1353 .p_flags
= PF_R
| PF_W
| PF_X
,
1359 .p_align
= page_size
,
1362 if (!gelf_update_phdr(kcore
->elf
, idx
, &phdr
))
1368 static off_t
kcore__write(struct kcore
*kcore
)
1370 return elf_update(kcore
->elf
, ELF_C_WRITE
);
1379 struct kcore_copy_info
{
1385 u64 last_module_symbol
;
1386 struct phdr_data kernel_map
;
1387 struct phdr_data modules_map
;
1390 static int kcore_copy__process_kallsyms(void *arg
, const char *name
, char type
,
1393 struct kcore_copy_info
*kci
= arg
;
1395 if (!symbol_type__is_a(type
, MAP__FUNCTION
))
1398 if (strchr(name
, '[')) {
1399 if (start
> kci
->last_module_symbol
)
1400 kci
->last_module_symbol
= start
;
1404 if (!kci
->first_symbol
|| start
< kci
->first_symbol
)
1405 kci
->first_symbol
= start
;
1407 if (!kci
->last_symbol
|| start
> kci
->last_symbol
)
1408 kci
->last_symbol
= start
;
1410 if (!strcmp(name
, "_stext")) {
1415 if (!strcmp(name
, "_etext")) {
1423 static int kcore_copy__parse_kallsyms(struct kcore_copy_info
*kci
,
1426 char kallsyms_filename
[PATH_MAX
];
1428 scnprintf(kallsyms_filename
, PATH_MAX
, "%s/kallsyms", dir
);
1430 if (symbol__restricted_filename(kallsyms_filename
, "/proc/kallsyms"))
1433 if (kallsyms__parse(kallsyms_filename
, kci
,
1434 kcore_copy__process_kallsyms
) < 0)
1440 static int kcore_copy__process_modules(void *arg
,
1441 const char *name __maybe_unused
,
1444 struct kcore_copy_info
*kci
= arg
;
1446 if (!kci
->first_module
|| start
< kci
->first_module
)
1447 kci
->first_module
= start
;
1452 static int kcore_copy__parse_modules(struct kcore_copy_info
*kci
,
1455 char modules_filename
[PATH_MAX
];
1457 scnprintf(modules_filename
, PATH_MAX
, "%s/modules", dir
);
1459 if (symbol__restricted_filename(modules_filename
, "/proc/modules"))
1462 if (modules__parse(modules_filename
, kci
,
1463 kcore_copy__process_modules
) < 0)
1469 static void kcore_copy__map(struct phdr_data
*p
, u64 start
, u64 end
, u64 pgoff
,
1472 if (p
->addr
|| s
< start
|| s
>= end
)
1476 p
->offset
= (s
- start
) + pgoff
;
1477 p
->len
= e
< end
? e
- s
: end
- s
;
1480 static int kcore_copy__read_map(u64 start
, u64 len
, u64 pgoff
, void *data
)
1482 struct kcore_copy_info
*kci
= data
;
1483 u64 end
= start
+ len
;
1485 kcore_copy__map(&kci
->kernel_map
, start
, end
, pgoff
, kci
->stext
,
1488 kcore_copy__map(&kci
->modules_map
, start
, end
, pgoff
, kci
->first_module
,
1489 kci
->last_module_symbol
);
1494 static int kcore_copy__read_maps(struct kcore_copy_info
*kci
, Elf
*elf
)
1496 if (elf_read_maps(elf
, true, kcore_copy__read_map
, kci
) < 0)
1502 static int kcore_copy__calc_maps(struct kcore_copy_info
*kci
, const char *dir
,
1505 if (kcore_copy__parse_kallsyms(kci
, dir
))
1508 if (kcore_copy__parse_modules(kci
, dir
))
1512 kci
->stext
= round_down(kci
->stext
, page_size
);
1514 kci
->stext
= round_down(kci
->first_symbol
, page_size
);
1517 kci
->etext
= round_up(kci
->etext
, page_size
);
1518 } else if (kci
->last_symbol
) {
1519 kci
->etext
= round_up(kci
->last_symbol
, page_size
);
1520 kci
->etext
+= page_size
;
1523 kci
->first_module
= round_down(kci
->first_module
, page_size
);
1525 if (kci
->last_module_symbol
) {
1526 kci
->last_module_symbol
= round_up(kci
->last_module_symbol
,
1528 kci
->last_module_symbol
+= page_size
;
1531 if (!kci
->stext
|| !kci
->etext
)
1534 if (kci
->first_module
&& !kci
->last_module_symbol
)
1537 return kcore_copy__read_maps(kci
, elf
);
1540 static int kcore_copy__copy_file(const char *from_dir
, const char *to_dir
,
1543 char from_filename
[PATH_MAX
];
1544 char to_filename
[PATH_MAX
];
1546 scnprintf(from_filename
, PATH_MAX
, "%s/%s", from_dir
, name
);
1547 scnprintf(to_filename
, PATH_MAX
, "%s/%s", to_dir
, name
);
1549 return copyfile_mode(from_filename
, to_filename
, 0400);
1552 static int kcore_copy__unlink(const char *dir
, const char *name
)
1554 char filename
[PATH_MAX
];
1556 scnprintf(filename
, PATH_MAX
, "%s/%s", dir
, name
);
1558 return unlink(filename
);
1561 static int kcore_copy__compare_fds(int from
, int to
)
1569 buf_from
= malloc(page_size
);
1570 buf_to
= malloc(page_size
);
1571 if (!buf_from
|| !buf_to
)
1575 /* Use read because mmap won't work on proc files */
1576 ret
= read(from
, buf_from
, page_size
);
1585 if (readn(to
, buf_to
, len
) != (int)len
)
1588 if (memcmp(buf_from
, buf_to
, len
))
1599 static int kcore_copy__compare_files(const char *from_filename
,
1600 const char *to_filename
)
1602 int from
, to
, err
= -1;
1604 from
= open(from_filename
, O_RDONLY
);
1608 to
= open(to_filename
, O_RDONLY
);
1610 goto out_close_from
;
1612 err
= kcore_copy__compare_fds(from
, to
);
1620 static int kcore_copy__compare_file(const char *from_dir
, const char *to_dir
,
1623 char from_filename
[PATH_MAX
];
1624 char to_filename
[PATH_MAX
];
1626 scnprintf(from_filename
, PATH_MAX
, "%s/%s", from_dir
, name
);
1627 scnprintf(to_filename
, PATH_MAX
, "%s/%s", to_dir
, name
);
1629 return kcore_copy__compare_files(from_filename
, to_filename
);
1633 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1634 * @from_dir: from directory
1635 * @to_dir: to directory
1637 * This function copies kallsyms, modules and kcore files from one directory to
1638 * another. kallsyms and modules are copied entirely. Only code segments are
1639 * copied from kcore. It is assumed that two segments suffice: one for the
1640 * kernel proper and one for all the modules. The code segments are determined
1641 * from kallsyms and modules files. The kernel map starts at _stext or the
1642 * lowest function symbol, and ends at _etext or the highest function symbol.
1643 * The module map starts at the lowest module address and ends at the highest
1644 * module symbol. Start addresses are rounded down to the nearest page. End
1645 * addresses are rounded up to the nearest page. An extra page is added to the
1646 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1647 * symbol too. Because it contains only code sections, the resulting kcore is
1648 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1649 * is not the same for the kernel map and the modules map. That happens because
1650 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1651 * kallsyms and modules files are compared with their copies to check that
1652 * modules have not been loaded or unloaded while the copies were taking place.
1654 * Return: %0 on success, %-1 on failure.
1656 int kcore_copy(const char *from_dir
, const char *to_dir
)
1659 struct kcore extract
;
1661 int idx
= 0, err
= -1;
1662 off_t offset
= page_size
, sz
, modules_offset
= 0;
1663 struct kcore_copy_info kci
= { .stext
= 0, };
1664 char kcore_filename
[PATH_MAX
];
1665 char extract_filename
[PATH_MAX
];
1667 if (kcore_copy__copy_file(from_dir
, to_dir
, "kallsyms"))
1670 if (kcore_copy__copy_file(from_dir
, to_dir
, "modules"))
1671 goto out_unlink_kallsyms
;
1673 scnprintf(kcore_filename
, PATH_MAX
, "%s/kcore", from_dir
);
1674 scnprintf(extract_filename
, PATH_MAX
, "%s/kcore", to_dir
);
1676 if (kcore__open(&kcore
, kcore_filename
))
1677 goto out_unlink_modules
;
1679 if (kcore_copy__calc_maps(&kci
, from_dir
, kcore
.elf
))
1680 goto out_kcore_close
;
1682 if (kcore__init(&extract
, extract_filename
, kcore
.elfclass
, false))
1683 goto out_kcore_close
;
1685 if (!kci
.modules_map
.addr
)
1688 if (kcore__copy_hdr(&kcore
, &extract
, count
))
1689 goto out_extract_close
;
1691 if (kcore__add_phdr(&extract
, idx
++, offset
, kci
.kernel_map
.addr
,
1692 kci
.kernel_map
.len
))
1693 goto out_extract_close
;
1695 if (kci
.modules_map
.addr
) {
1696 modules_offset
= offset
+ kci
.kernel_map
.len
;
1697 if (kcore__add_phdr(&extract
, idx
, modules_offset
,
1698 kci
.modules_map
.addr
, kci
.modules_map
.len
))
1699 goto out_extract_close
;
1702 sz
= kcore__write(&extract
);
1703 if (sz
< 0 || sz
> offset
)
1704 goto out_extract_close
;
1706 if (copy_bytes(kcore
.fd
, kci
.kernel_map
.offset
, extract
.fd
, offset
,
1707 kci
.kernel_map
.len
))
1708 goto out_extract_close
;
1710 if (modules_offset
&& copy_bytes(kcore
.fd
, kci
.modules_map
.offset
,
1711 extract
.fd
, modules_offset
,
1712 kci
.modules_map
.len
))
1713 goto out_extract_close
;
1715 if (kcore_copy__compare_file(from_dir
, to_dir
, "modules"))
1716 goto out_extract_close
;
1718 if (kcore_copy__compare_file(from_dir
, to_dir
, "kallsyms"))
1719 goto out_extract_close
;
1724 kcore__close(&extract
);
1726 unlink(extract_filename
);
1728 kcore__close(&kcore
);
1731 kcore_copy__unlink(to_dir
, "modules");
1732 out_unlink_kallsyms
:
1734 kcore_copy__unlink(to_dir
, "kallsyms");
1739 int kcore_extract__create(struct kcore_extract
*kce
)
1742 struct kcore extract
;
1744 int idx
= 0, err
= -1;
1745 off_t offset
= page_size
, sz
;
1747 if (kcore__open(&kcore
, kce
->kcore_filename
))
1750 strcpy(kce
->extract_filename
, PERF_KCORE_EXTRACT
);
1751 if (kcore__init(&extract
, kce
->extract_filename
, kcore
.elfclass
, true))
1752 goto out_kcore_close
;
1754 if (kcore__copy_hdr(&kcore
, &extract
, count
))
1755 goto out_extract_close
;
1757 if (kcore__add_phdr(&extract
, idx
, offset
, kce
->addr
, kce
->len
))
1758 goto out_extract_close
;
1760 sz
= kcore__write(&extract
);
1761 if (sz
< 0 || sz
> offset
)
1762 goto out_extract_close
;
1764 if (copy_bytes(kcore
.fd
, kce
->offs
, extract
.fd
, offset
, kce
->len
))
1765 goto out_extract_close
;
1770 kcore__close(&extract
);
1772 unlink(kce
->extract_filename
);
1774 kcore__close(&kcore
);
1779 void kcore_extract__delete(struct kcore_extract
*kce
)
1781 unlink(kce
->extract_filename
);
1784 void symbol__elf_init(void)
1786 elf_version(EV_CURRENT
);