15 const char *map_type__name
[MAP__NR_TYPES
] = {
16 [MAP__FUNCTION
] = "Functions",
17 [MAP__VARIABLE
] = "Variables",
20 static inline int is_anon_memory(const char *filename
)
22 return !strcmp(filename
, "//anon") ||
23 !strcmp(filename
, "/anon_hugepage (deleted)");
26 static inline int is_no_dso_memory(const char *filename
)
28 return !strncmp(filename
, "[stack", 6) ||
29 !strcmp(filename
, "[heap]");
32 void map__init(struct map
*self
, enum map_type type
,
33 u64 start
, u64 end
, u64 pgoff
, struct dso
*dso
)
40 self
->map_ip
= map__map_ip
;
41 self
->unmap_ip
= map__unmap_ip
;
42 RB_CLEAR_NODE(&self
->rb_node
);
44 self
->referenced
= false;
45 self
->erange_warned
= false;
48 struct map
*map__new(struct list_head
*dsos__list
, u64 start
, u64 len
,
49 u64 pgoff
, u32 pid
, char *filename
,
52 struct map
*self
= malloc(sizeof(*self
));
55 char newfilename
[PATH_MAX
];
57 int anon
, no_dso
, vdso
;
59 anon
= is_anon_memory(filename
);
60 vdso
= is_vdso_map(filename
);
61 no_dso
= is_no_dso_memory(filename
);
64 snprintf(newfilename
, sizeof(newfilename
), "/tmp/perf-%d.map", pid
);
65 filename
= newfilename
;
70 dso
= vdso__dso_findnew(dsos__list
);
72 dso
= __dsos__findnew(dsos__list
, filename
);
77 map__init(self
, type
, start
, start
+ len
, pgoff
, dso
);
80 self
->map_ip
= self
->unmap_ip
= identity__map_ip
;
83 * Set memory without DSO as loaded. All map__find_*
84 * functions still return NULL, and we avoid the
85 * unnecessary map__load warning.
88 dso__set_loaded(dso
, self
->type
);
98 * Constructor variant for modules (where we know from /proc/modules where
99 * they are loaded) and for vmlinux, where only after we load all the
100 * symbols we'll know where it starts and ends.
102 struct map
*map__new2(u64 start
, struct dso
*dso
, enum map_type type
)
104 struct map
*map
= calloc(1, (sizeof(*map
) +
105 (dso
->kernel
? sizeof(struct kmap
) : 0)));
108 * ->end will be filled after we load all the symbols
110 map__init(map
, type
, start
, 0, 0, dso
);
116 void map__delete(struct map
*self
)
121 void map__fixup_start(struct map
*self
)
123 struct rb_root
*symbols
= &self
->dso
->symbols
[self
->type
];
124 struct rb_node
*nd
= rb_first(symbols
);
126 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
127 self
->start
= sym
->start
;
131 void map__fixup_end(struct map
*self
)
133 struct rb_root
*symbols
= &self
->dso
->symbols
[self
->type
];
134 struct rb_node
*nd
= rb_last(symbols
);
136 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
137 self
->end
= sym
->end
;
141 #define DSO__DELETED "(deleted)"
143 int map__load(struct map
*self
, symbol_filter_t filter
)
145 const char *name
= self
->dso
->long_name
;
148 if (dso__loaded(self
->dso
, self
->type
))
151 nr
= dso__load(self
->dso
, self
, filter
);
153 if (self
->dso
->has_build_id
) {
154 char sbuild_id
[BUILD_ID_SIZE
* 2 + 1];
156 build_id__sprintf(self
->dso
->build_id
,
157 sizeof(self
->dso
->build_id
),
159 pr_warning("%s with build id %s not found",
162 pr_warning("Failed to open %s", name
);
164 pr_warning(", continuing without symbols\n");
166 } else if (nr
== 0) {
167 #ifdef LIBELF_SUPPORT
168 const size_t len
= strlen(name
);
169 const size_t real_len
= len
- sizeof(DSO__DELETED
);
171 if (len
> sizeof(DSO__DELETED
) &&
172 strcmp(name
+ real_len
+ 1, DSO__DELETED
) == 0) {
173 pr_warning("%.*s was updated (is prelink enabled?). "
174 "Restart the long running apps that use it!\n",
175 (int)real_len
, name
);
177 pr_warning("no symbols found in %s, maybe install "
178 "a debug package?\n", name
);
184 * Only applies to the kernel, as its symtabs aren't relative like the
187 if (self
->dso
->kernel
)
188 map__reloc_vmlinux(self
);
193 struct symbol
*map__find_symbol(struct map
*self
, u64 addr
,
194 symbol_filter_t filter
)
196 if (map__load(self
, filter
) < 0)
199 return dso__find_symbol(self
->dso
, self
->type
, addr
);
202 struct symbol
*map__find_symbol_by_name(struct map
*self
, const char *name
,
203 symbol_filter_t filter
)
205 if (map__load(self
, filter
) < 0)
208 if (!dso__sorted_by_name(self
->dso
, self
->type
))
209 dso__sort_by_name(self
->dso
, self
->type
);
211 return dso__find_symbol_by_name(self
->dso
, self
->type
, name
);
214 struct map
*map__clone(struct map
*self
)
216 struct map
*map
= malloc(sizeof(*self
));
221 memcpy(map
, self
, sizeof(*self
));
226 int map__overlap(struct map
*l
, struct map
*r
)
228 if (l
->start
> r
->start
) {
234 if (l
->end
> r
->start
)
240 size_t map__fprintf(struct map
*self
, FILE *fp
)
242 return fprintf(fp
, " %" PRIx64
"-%" PRIx64
" %" PRIx64
" %s\n",
243 self
->start
, self
->end
, self
->pgoff
, self
->dso
->name
);
246 size_t map__fprintf_dsoname(struct map
*map
, FILE *fp
)
248 const char *dsoname
= "[unknown]";
250 if (map
&& map
->dso
&& (map
->dso
->name
|| map
->dso
->long_name
)) {
251 if (symbol_conf
.show_kernel_path
&& map
->dso
->long_name
)
252 dsoname
= map
->dso
->long_name
;
253 else if (map
->dso
->name
)
254 dsoname
= map
->dso
->name
;
257 return fprintf(fp
, "%s", dsoname
);
261 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
262 * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
264 u64
map__rip_2objdump(struct map
*map
, u64 rip
)
266 u64 addr
= map
->dso
->adjust_symbols
?
267 map
->unmap_ip(map
, rip
) : /* RIP -> IP */
272 void map_groups__init(struct map_groups
*mg
)
275 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
276 mg
->maps
[i
] = RB_ROOT
;
277 INIT_LIST_HEAD(&mg
->removed_maps
[i
]);
282 static void maps__delete(struct rb_root
*maps
)
284 struct rb_node
*next
= rb_first(maps
);
287 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
289 next
= rb_next(&pos
->rb_node
);
290 rb_erase(&pos
->rb_node
, maps
);
295 static void maps__delete_removed(struct list_head
*maps
)
299 list_for_each_entry_safe(pos
, n
, maps
, node
) {
300 list_del(&pos
->node
);
305 void map_groups__exit(struct map_groups
*mg
)
309 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
310 maps__delete(&mg
->maps
[i
]);
311 maps__delete_removed(&mg
->removed_maps
[i
]);
315 void map_groups__flush(struct map_groups
*mg
)
319 for (type
= 0; type
< MAP__NR_TYPES
; type
++) {
320 struct rb_root
*root
= &mg
->maps
[type
];
321 struct rb_node
*next
= rb_first(root
);
324 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
325 next
= rb_next(&pos
->rb_node
);
326 rb_erase(&pos
->rb_node
, root
);
328 * We may have references to this map, for
329 * instance in some hist_entry instances, so
330 * just move them to a separate list.
332 list_add_tail(&pos
->node
, &mg
->removed_maps
[pos
->type
]);
337 struct symbol
*map_groups__find_symbol(struct map_groups
*mg
,
338 enum map_type type
, u64 addr
,
340 symbol_filter_t filter
)
342 struct map
*map
= map_groups__find(mg
, type
, addr
);
347 return map__find_symbol(map
, map
->map_ip(map
, addr
), filter
);
353 struct symbol
*map_groups__find_symbol_by_name(struct map_groups
*mg
,
357 symbol_filter_t filter
)
361 for (nd
= rb_first(&mg
->maps
[type
]); nd
; nd
= rb_next(nd
)) {
362 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
363 struct symbol
*sym
= map__find_symbol_by_name(pos
, name
, filter
);
375 size_t __map_groups__fprintf_maps(struct map_groups
*mg
,
376 enum map_type type
, int verbose
, FILE *fp
)
378 size_t printed
= fprintf(fp
, "%s:\n", map_type__name
[type
]);
381 for (nd
= rb_first(&mg
->maps
[type
]); nd
; nd
= rb_next(nd
)) {
382 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
383 printed
+= fprintf(fp
, "Map:");
384 printed
+= map__fprintf(pos
, fp
);
386 printed
+= dso__fprintf(pos
->dso
, type
, fp
);
387 printed
+= fprintf(fp
, "--\n");
394 size_t map_groups__fprintf_maps(struct map_groups
*mg
, int verbose
, FILE *fp
)
396 size_t printed
= 0, i
;
397 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
398 printed
+= __map_groups__fprintf_maps(mg
, i
, verbose
, fp
);
402 static size_t __map_groups__fprintf_removed_maps(struct map_groups
*mg
,
404 int verbose
, FILE *fp
)
409 list_for_each_entry(pos
, &mg
->removed_maps
[type
], node
) {
410 printed
+= fprintf(fp
, "Map:");
411 printed
+= map__fprintf(pos
, fp
);
413 printed
+= dso__fprintf(pos
->dso
, type
, fp
);
414 printed
+= fprintf(fp
, "--\n");
420 static size_t map_groups__fprintf_removed_maps(struct map_groups
*mg
,
421 int verbose
, FILE *fp
)
423 size_t printed
= 0, i
;
424 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
425 printed
+= __map_groups__fprintf_removed_maps(mg
, i
, verbose
, fp
);
429 size_t map_groups__fprintf(struct map_groups
*mg
, int verbose
, FILE *fp
)
431 size_t printed
= map_groups__fprintf_maps(mg
, verbose
, fp
);
432 printed
+= fprintf(fp
, "Removed maps:\n");
433 return printed
+ map_groups__fprintf_removed_maps(mg
, verbose
, fp
);
436 int map_groups__fixup_overlappings(struct map_groups
*mg
, struct map
*map
,
437 int verbose
, FILE *fp
)
439 struct rb_root
*root
= &mg
->maps
[map
->type
];
440 struct rb_node
*next
= rb_first(root
);
444 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
445 next
= rb_next(&pos
->rb_node
);
447 if (!map__overlap(pos
, map
))
451 fputs("overlapping maps:\n", fp
);
452 map__fprintf(map
, fp
);
453 map__fprintf(pos
, fp
);
456 rb_erase(&pos
->rb_node
, root
);
458 * Now check if we need to create new maps for areas not
459 * overlapped by the new map:
461 if (map
->start
> pos
->start
) {
462 struct map
*before
= map__clone(pos
);
464 if (before
== NULL
) {
469 before
->end
= map
->start
- 1;
470 map_groups__insert(mg
, before
);
472 map__fprintf(before
, fp
);
475 if (map
->end
< pos
->end
) {
476 struct map
*after
= map__clone(pos
);
483 after
->start
= map
->end
+ 1;
484 map_groups__insert(mg
, after
);
486 map__fprintf(after
, fp
);
490 * If we have references, just move them to a separate list.
493 list_add_tail(&pos
->node
, &mg
->removed_maps
[map
->type
]);
505 * XXX This should not really _copy_ te maps, but refcount them.
507 int map_groups__clone(struct map_groups
*mg
,
508 struct map_groups
*parent
, enum map_type type
)
511 for (nd
= rb_first(&parent
->maps
[type
]); nd
; nd
= rb_next(nd
)) {
512 struct map
*map
= rb_entry(nd
, struct map
, rb_node
);
513 struct map
*new = map__clone(map
);
516 map_groups__insert(mg
, new);
521 static u64
map__reloc_map_ip(struct map
*map
, u64 ip
)
523 return ip
+ (s64
)map
->pgoff
;
526 static u64
map__reloc_unmap_ip(struct map
*map
, u64 ip
)
528 return ip
- (s64
)map
->pgoff
;
531 void map__reloc_vmlinux(struct map
*self
)
533 struct kmap
*kmap
= map__kmap(self
);
536 if (!kmap
->ref_reloc_sym
|| !kmap
->ref_reloc_sym
->unrelocated_addr
)
539 reloc
= (kmap
->ref_reloc_sym
->unrelocated_addr
-
540 kmap
->ref_reloc_sym
->addr
);
545 self
->map_ip
= map__reloc_map_ip
;
546 self
->unmap_ip
= map__reloc_unmap_ip
;
550 void maps__insert(struct rb_root
*maps
, struct map
*map
)
552 struct rb_node
**p
= &maps
->rb_node
;
553 struct rb_node
*parent
= NULL
;
554 const u64 ip
= map
->start
;
559 m
= rb_entry(parent
, struct map
, rb_node
);
566 rb_link_node(&map
->rb_node
, parent
, p
);
567 rb_insert_color(&map
->rb_node
, maps
);
570 void maps__remove(struct rb_root
*self
, struct map
*map
)
572 rb_erase(&map
->rb_node
, self
);
575 struct map
*maps__find(struct rb_root
*maps
, u64 ip
)
577 struct rb_node
**p
= &maps
->rb_node
;
578 struct rb_node
*parent
= NULL
;
583 m
= rb_entry(parent
, struct map
, rb_node
);
586 else if (ip
> m
->end
)
This page took 0.052798 seconds and 5 git commands to generate.