perf tools: Add anonymous huge page recognition
[deliverable/linux.git] / tools / perf / util / map.c
1 #include "symbol.h"
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <limits.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <unistd.h>
9 #include "map.h"
10 #include "thread.h"
11 #include "strlist.h"
12 #include "vdso.h"
13 #include "build-id.h"
14
15 const char *map_type__name[MAP__NR_TYPES] = {
16 [MAP__FUNCTION] = "Functions",
17 [MAP__VARIABLE] = "Variables",
18 };
19
20 static inline int is_anon_memory(const char *filename)
21 {
22 return !strcmp(filename, "//anon") ||
23 !strcmp(filename, "/anon_hugepage (deleted)");
24 }
25
26 static inline int is_no_dso_memory(const char *filename)
27 {
28 return !strncmp(filename, "[stack", 6) ||
29 !strcmp(filename, "[heap]");
30 }
31
32 void map__init(struct map *self, enum map_type type,
33 u64 start, u64 end, u64 pgoff, struct dso *dso)
34 {
35 self->type = type;
36 self->start = start;
37 self->end = end;
38 self->pgoff = pgoff;
39 self->dso = dso;
40 self->map_ip = map__map_ip;
41 self->unmap_ip = map__unmap_ip;
42 RB_CLEAR_NODE(&self->rb_node);
43 self->groups = NULL;
44 self->referenced = false;
45 self->erange_warned = false;
46 }
47
48 struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
49 u64 pgoff, u32 pid, char *filename,
50 enum map_type type)
51 {
52 struct map *self = malloc(sizeof(*self));
53
54 if (self != NULL) {
55 char newfilename[PATH_MAX];
56 struct dso *dso;
57 int anon, no_dso, vdso;
58
59 anon = is_anon_memory(filename);
60 vdso = is_vdso_map(filename);
61 no_dso = is_no_dso_memory(filename);
62
63 if (anon) {
64 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
65 filename = newfilename;
66 }
67
68 if (vdso) {
69 pgoff = 0;
70 dso = vdso__dso_findnew(dsos__list);
71 } else
72 dso = __dsos__findnew(dsos__list, filename);
73
74 if (dso == NULL)
75 goto out_delete;
76
77 map__init(self, type, start, start + len, pgoff, dso);
78
79 if (anon || no_dso) {
80 self->map_ip = self->unmap_ip = identity__map_ip;
81
82 /*
83 * Set memory without DSO as loaded. All map__find_*
84 * functions still return NULL, and we avoid the
85 * unnecessary map__load warning.
86 */
87 if (no_dso)
88 dso__set_loaded(dso, self->type);
89 }
90 }
91 return self;
92 out_delete:
93 free(self);
94 return NULL;
95 }
96
97 /*
98 * Constructor variant for modules (where we know from /proc/modules where
99 * they are loaded) and for vmlinux, where only after we load all the
100 * symbols we'll know where it starts and ends.
101 */
102 struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
103 {
104 struct map *map = calloc(1, (sizeof(*map) +
105 (dso->kernel ? sizeof(struct kmap) : 0)));
106 if (map != NULL) {
107 /*
108 * ->end will be filled after we load all the symbols
109 */
110 map__init(map, type, start, 0, 0, dso);
111 }
112
113 return map;
114 }
115
116 void map__delete(struct map *self)
117 {
118 free(self);
119 }
120
121 void map__fixup_start(struct map *self)
122 {
123 struct rb_root *symbols = &self->dso->symbols[self->type];
124 struct rb_node *nd = rb_first(symbols);
125 if (nd != NULL) {
126 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
127 self->start = sym->start;
128 }
129 }
130
131 void map__fixup_end(struct map *self)
132 {
133 struct rb_root *symbols = &self->dso->symbols[self->type];
134 struct rb_node *nd = rb_last(symbols);
135 if (nd != NULL) {
136 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
137 self->end = sym->end;
138 }
139 }
140
141 #define DSO__DELETED "(deleted)"
142
143 int map__load(struct map *self, symbol_filter_t filter)
144 {
145 const char *name = self->dso->long_name;
146 int nr;
147
148 if (dso__loaded(self->dso, self->type))
149 return 0;
150
151 nr = dso__load(self->dso, self, filter);
152 if (nr < 0) {
153 if (self->dso->has_build_id) {
154 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
155
156 build_id__sprintf(self->dso->build_id,
157 sizeof(self->dso->build_id),
158 sbuild_id);
159 pr_warning("%s with build id %s not found",
160 name, sbuild_id);
161 } else
162 pr_warning("Failed to open %s", name);
163
164 pr_warning(", continuing without symbols\n");
165 return -1;
166 } else if (nr == 0) {
167 #ifdef LIBELF_SUPPORT
168 const size_t len = strlen(name);
169 const size_t real_len = len - sizeof(DSO__DELETED);
170
171 if (len > sizeof(DSO__DELETED) &&
172 strcmp(name + real_len + 1, DSO__DELETED) == 0) {
173 pr_warning("%.*s was updated (is prelink enabled?). "
174 "Restart the long running apps that use it!\n",
175 (int)real_len, name);
176 } else {
177 pr_warning("no symbols found in %s, maybe install "
178 "a debug package?\n", name);
179 }
180 #endif
181 return -1;
182 }
183 /*
184 * Only applies to the kernel, as its symtabs aren't relative like the
185 * module ones.
186 */
187 if (self->dso->kernel)
188 map__reloc_vmlinux(self);
189
190 return 0;
191 }
192
193 struct symbol *map__find_symbol(struct map *self, u64 addr,
194 symbol_filter_t filter)
195 {
196 if (map__load(self, filter) < 0)
197 return NULL;
198
199 return dso__find_symbol(self->dso, self->type, addr);
200 }
201
202 struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
203 symbol_filter_t filter)
204 {
205 if (map__load(self, filter) < 0)
206 return NULL;
207
208 if (!dso__sorted_by_name(self->dso, self->type))
209 dso__sort_by_name(self->dso, self->type);
210
211 return dso__find_symbol_by_name(self->dso, self->type, name);
212 }
213
214 struct map *map__clone(struct map *self)
215 {
216 struct map *map = malloc(sizeof(*self));
217
218 if (!map)
219 return NULL;
220
221 memcpy(map, self, sizeof(*self));
222
223 return map;
224 }
225
226 int map__overlap(struct map *l, struct map *r)
227 {
228 if (l->start > r->start) {
229 struct map *t = l;
230 l = r;
231 r = t;
232 }
233
234 if (l->end > r->start)
235 return 1;
236
237 return 0;
238 }
239
240 size_t map__fprintf(struct map *self, FILE *fp)
241 {
242 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
243 self->start, self->end, self->pgoff, self->dso->name);
244 }
245
246 size_t map__fprintf_dsoname(struct map *map, FILE *fp)
247 {
248 const char *dsoname = "[unknown]";
249
250 if (map && map->dso && (map->dso->name || map->dso->long_name)) {
251 if (symbol_conf.show_kernel_path && map->dso->long_name)
252 dsoname = map->dso->long_name;
253 else if (map->dso->name)
254 dsoname = map->dso->name;
255 }
256
257 return fprintf(fp, "%s", dsoname);
258 }
259
260 /*
261 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
262 * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
263 */
264 u64 map__rip_2objdump(struct map *map, u64 rip)
265 {
266 u64 addr = map->dso->adjust_symbols ?
267 map->unmap_ip(map, rip) : /* RIP -> IP */
268 rip;
269 return addr;
270 }
271
272 void map_groups__init(struct map_groups *mg)
273 {
274 int i;
275 for (i = 0; i < MAP__NR_TYPES; ++i) {
276 mg->maps[i] = RB_ROOT;
277 INIT_LIST_HEAD(&mg->removed_maps[i]);
278 }
279 mg->machine = NULL;
280 }
281
282 static void maps__delete(struct rb_root *maps)
283 {
284 struct rb_node *next = rb_first(maps);
285
286 while (next) {
287 struct map *pos = rb_entry(next, struct map, rb_node);
288
289 next = rb_next(&pos->rb_node);
290 rb_erase(&pos->rb_node, maps);
291 map__delete(pos);
292 }
293 }
294
295 static void maps__delete_removed(struct list_head *maps)
296 {
297 struct map *pos, *n;
298
299 list_for_each_entry_safe(pos, n, maps, node) {
300 list_del(&pos->node);
301 map__delete(pos);
302 }
303 }
304
305 void map_groups__exit(struct map_groups *mg)
306 {
307 int i;
308
309 for (i = 0; i < MAP__NR_TYPES; ++i) {
310 maps__delete(&mg->maps[i]);
311 maps__delete_removed(&mg->removed_maps[i]);
312 }
313 }
314
315 void map_groups__flush(struct map_groups *mg)
316 {
317 int type;
318
319 for (type = 0; type < MAP__NR_TYPES; type++) {
320 struct rb_root *root = &mg->maps[type];
321 struct rb_node *next = rb_first(root);
322
323 while (next) {
324 struct map *pos = rb_entry(next, struct map, rb_node);
325 next = rb_next(&pos->rb_node);
326 rb_erase(&pos->rb_node, root);
327 /*
328 * We may have references to this map, for
329 * instance in some hist_entry instances, so
330 * just move them to a separate list.
331 */
332 list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
333 }
334 }
335 }
336
337 struct symbol *map_groups__find_symbol(struct map_groups *mg,
338 enum map_type type, u64 addr,
339 struct map **mapp,
340 symbol_filter_t filter)
341 {
342 struct map *map = map_groups__find(mg, type, addr);
343
344 if (map != NULL) {
345 if (mapp != NULL)
346 *mapp = map;
347 return map__find_symbol(map, map->map_ip(map, addr), filter);
348 }
349
350 return NULL;
351 }
352
353 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
354 enum map_type type,
355 const char *name,
356 struct map **mapp,
357 symbol_filter_t filter)
358 {
359 struct rb_node *nd;
360
361 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
362 struct map *pos = rb_entry(nd, struct map, rb_node);
363 struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
364
365 if (sym == NULL)
366 continue;
367 if (mapp != NULL)
368 *mapp = pos;
369 return sym;
370 }
371
372 return NULL;
373 }
374
375 size_t __map_groups__fprintf_maps(struct map_groups *mg,
376 enum map_type type, int verbose, FILE *fp)
377 {
378 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
379 struct rb_node *nd;
380
381 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
382 struct map *pos = rb_entry(nd, struct map, rb_node);
383 printed += fprintf(fp, "Map:");
384 printed += map__fprintf(pos, fp);
385 if (verbose > 2) {
386 printed += dso__fprintf(pos->dso, type, fp);
387 printed += fprintf(fp, "--\n");
388 }
389 }
390
391 return printed;
392 }
393
394 size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
395 {
396 size_t printed = 0, i;
397 for (i = 0; i < MAP__NR_TYPES; ++i)
398 printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
399 return printed;
400 }
401
402 static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
403 enum map_type type,
404 int verbose, FILE *fp)
405 {
406 struct map *pos;
407 size_t printed = 0;
408
409 list_for_each_entry(pos, &mg->removed_maps[type], node) {
410 printed += fprintf(fp, "Map:");
411 printed += map__fprintf(pos, fp);
412 if (verbose > 1) {
413 printed += dso__fprintf(pos->dso, type, fp);
414 printed += fprintf(fp, "--\n");
415 }
416 }
417 return printed;
418 }
419
420 static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
421 int verbose, FILE *fp)
422 {
423 size_t printed = 0, i;
424 for (i = 0; i < MAP__NR_TYPES; ++i)
425 printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
426 return printed;
427 }
428
429 size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
430 {
431 size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
432 printed += fprintf(fp, "Removed maps:\n");
433 return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
434 }
435
436 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
437 int verbose, FILE *fp)
438 {
439 struct rb_root *root = &mg->maps[map->type];
440 struct rb_node *next = rb_first(root);
441 int err = 0;
442
443 while (next) {
444 struct map *pos = rb_entry(next, struct map, rb_node);
445 next = rb_next(&pos->rb_node);
446
447 if (!map__overlap(pos, map))
448 continue;
449
450 if (verbose >= 2) {
451 fputs("overlapping maps:\n", fp);
452 map__fprintf(map, fp);
453 map__fprintf(pos, fp);
454 }
455
456 rb_erase(&pos->rb_node, root);
457 /*
458 * Now check if we need to create new maps for areas not
459 * overlapped by the new map:
460 */
461 if (map->start > pos->start) {
462 struct map *before = map__clone(pos);
463
464 if (before == NULL) {
465 err = -ENOMEM;
466 goto move_map;
467 }
468
469 before->end = map->start - 1;
470 map_groups__insert(mg, before);
471 if (verbose >= 2)
472 map__fprintf(before, fp);
473 }
474
475 if (map->end < pos->end) {
476 struct map *after = map__clone(pos);
477
478 if (after == NULL) {
479 err = -ENOMEM;
480 goto move_map;
481 }
482
483 after->start = map->end + 1;
484 map_groups__insert(mg, after);
485 if (verbose >= 2)
486 map__fprintf(after, fp);
487 }
488 move_map:
489 /*
490 * If we have references, just move them to a separate list.
491 */
492 if (pos->referenced)
493 list_add_tail(&pos->node, &mg->removed_maps[map->type]);
494 else
495 map__delete(pos);
496
497 if (err)
498 return err;
499 }
500
501 return 0;
502 }
503
504 /*
505 * XXX This should not really _copy_ te maps, but refcount them.
506 */
507 int map_groups__clone(struct map_groups *mg,
508 struct map_groups *parent, enum map_type type)
509 {
510 struct rb_node *nd;
511 for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
512 struct map *map = rb_entry(nd, struct map, rb_node);
513 struct map *new = map__clone(map);
514 if (new == NULL)
515 return -ENOMEM;
516 map_groups__insert(mg, new);
517 }
518 return 0;
519 }
520
521 static u64 map__reloc_map_ip(struct map *map, u64 ip)
522 {
523 return ip + (s64)map->pgoff;
524 }
525
526 static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
527 {
528 return ip - (s64)map->pgoff;
529 }
530
531 void map__reloc_vmlinux(struct map *self)
532 {
533 struct kmap *kmap = map__kmap(self);
534 s64 reloc;
535
536 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
537 return;
538
539 reloc = (kmap->ref_reloc_sym->unrelocated_addr -
540 kmap->ref_reloc_sym->addr);
541
542 if (!reloc)
543 return;
544
545 self->map_ip = map__reloc_map_ip;
546 self->unmap_ip = map__reloc_unmap_ip;
547 self->pgoff = reloc;
548 }
549
550 void maps__insert(struct rb_root *maps, struct map *map)
551 {
552 struct rb_node **p = &maps->rb_node;
553 struct rb_node *parent = NULL;
554 const u64 ip = map->start;
555 struct map *m;
556
557 while (*p != NULL) {
558 parent = *p;
559 m = rb_entry(parent, struct map, rb_node);
560 if (ip < m->start)
561 p = &(*p)->rb_left;
562 else
563 p = &(*p)->rb_right;
564 }
565
566 rb_link_node(&map->rb_node, parent, p);
567 rb_insert_color(&map->rb_node, maps);
568 }
569
570 void maps__remove(struct rb_root *self, struct map *map)
571 {
572 rb_erase(&map->rb_node, self);
573 }
574
575 struct map *maps__find(struct rb_root *maps, u64 ip)
576 {
577 struct rb_node **p = &maps->rb_node;
578 struct rb_node *parent = NULL;
579 struct map *m;
580
581 while (*p != NULL) {
582 parent = *p;
583 m = rb_entry(parent, struct map, rb_node);
584 if (ip < m->start)
585 p = &(*p)->rb_left;
586 else if (ip > m->end)
587 p = &(*p)->rb_right;
588 else
589 return m;
590 }
591
592 return NULL;
593 }
This page took 0.052798 seconds and 5 git commands to generate.