Revert "UBI: use mtd->writebufsize to set minimal I/O unit size"
[deliverable/linux.git] / tools / perf / util / map.c
1 #include "symbol.h"
2 #include <errno.h>
3 #include <limits.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include "map.h"
9
10 const char *map_type__name[MAP__NR_TYPES] = {
11 [MAP__FUNCTION] = "Functions",
12 [MAP__VARIABLE] = "Variables",
13 };
14
15 static inline int is_anon_memory(const char *filename)
16 {
17 return strcmp(filename, "//anon") == 0;
18 }
19
20 void map__init(struct map *self, enum map_type type,
21 u64 start, u64 end, u64 pgoff, struct dso *dso)
22 {
23 self->type = type;
24 self->start = start;
25 self->end = end;
26 self->pgoff = pgoff;
27 self->dso = dso;
28 self->map_ip = map__map_ip;
29 self->unmap_ip = map__unmap_ip;
30 RB_CLEAR_NODE(&self->rb_node);
31 self->groups = NULL;
32 self->referenced = false;
33 }
34
35 struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
36 u64 pgoff, u32 pid, char *filename,
37 enum map_type type)
38 {
39 struct map *self = malloc(sizeof(*self));
40
41 if (self != NULL) {
42 char newfilename[PATH_MAX];
43 struct dso *dso;
44 int anon;
45
46 anon = is_anon_memory(filename);
47
48 if (anon) {
49 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
50 filename = newfilename;
51 }
52
53 dso = __dsos__findnew(dsos__list, filename);
54 if (dso == NULL)
55 goto out_delete;
56
57 map__init(self, type, start, start + len, pgoff, dso);
58
59 if (anon) {
60 set_identity:
61 self->map_ip = self->unmap_ip = identity__map_ip;
62 } else if (strcmp(filename, "[vdso]") == 0) {
63 dso__set_loaded(dso, self->type);
64 goto set_identity;
65 }
66 }
67 return self;
68 out_delete:
69 free(self);
70 return NULL;
71 }
72
73 void map__delete(struct map *self)
74 {
75 free(self);
76 }
77
78 void map__fixup_start(struct map *self)
79 {
80 struct rb_root *symbols = &self->dso->symbols[self->type];
81 struct rb_node *nd = rb_first(symbols);
82 if (nd != NULL) {
83 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
84 self->start = sym->start;
85 }
86 }
87
88 void map__fixup_end(struct map *self)
89 {
90 struct rb_root *symbols = &self->dso->symbols[self->type];
91 struct rb_node *nd = rb_last(symbols);
92 if (nd != NULL) {
93 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
94 self->end = sym->end;
95 }
96 }
97
98 #define DSO__DELETED "(deleted)"
99
100 int map__load(struct map *self, symbol_filter_t filter)
101 {
102 const char *name = self->dso->long_name;
103 int nr;
104
105 if (dso__loaded(self->dso, self->type))
106 return 0;
107
108 nr = dso__load(self->dso, self, filter);
109 if (nr < 0) {
110 if (self->dso->has_build_id) {
111 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
112
113 build_id__sprintf(self->dso->build_id,
114 sizeof(self->dso->build_id),
115 sbuild_id);
116 pr_warning("%s with build id %s not found",
117 name, sbuild_id);
118 } else
119 pr_warning("Failed to open %s", name);
120
121 pr_warning(", continuing without symbols\n");
122 return -1;
123 } else if (nr == 0) {
124 const size_t len = strlen(name);
125 const size_t real_len = len - sizeof(DSO__DELETED);
126
127 if (len > sizeof(DSO__DELETED) &&
128 strcmp(name + real_len + 1, DSO__DELETED) == 0) {
129 pr_warning("%.*s was updated, restart the long "
130 "running apps that use it!\n",
131 (int)real_len, name);
132 } else {
133 pr_warning("no symbols found in %s, maybe install "
134 "a debug package?\n", name);
135 }
136
137 return -1;
138 }
139 /*
140 * Only applies to the kernel, as its symtabs aren't relative like the
141 * module ones.
142 */
143 if (self->dso->kernel)
144 map__reloc_vmlinux(self);
145
146 return 0;
147 }
148
149 struct symbol *map__find_symbol(struct map *self, u64 addr,
150 symbol_filter_t filter)
151 {
152 if (map__load(self, filter) < 0)
153 return NULL;
154
155 return dso__find_symbol(self->dso, self->type, addr);
156 }
157
158 struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
159 symbol_filter_t filter)
160 {
161 if (map__load(self, filter) < 0)
162 return NULL;
163
164 if (!dso__sorted_by_name(self->dso, self->type))
165 dso__sort_by_name(self->dso, self->type);
166
167 return dso__find_symbol_by_name(self->dso, self->type, name);
168 }
169
170 struct map *map__clone(struct map *self)
171 {
172 struct map *map = malloc(sizeof(*self));
173
174 if (!map)
175 return NULL;
176
177 memcpy(map, self, sizeof(*self));
178
179 return map;
180 }
181
182 int map__overlap(struct map *l, struct map *r)
183 {
184 if (l->start > r->start) {
185 struct map *t = l;
186 l = r;
187 r = t;
188 }
189
190 if (l->end > r->start)
191 return 1;
192
193 return 0;
194 }
195
196 size_t map__fprintf(struct map *self, FILE *fp)
197 {
198 return fprintf(fp, " %Lx-%Lx %Lx %s\n",
199 self->start, self->end, self->pgoff, self->dso->name);
200 }
201
202 /*
203 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
204 * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
205 */
206 u64 map__rip_2objdump(struct map *map, u64 rip)
207 {
208 u64 addr = map->dso->adjust_symbols ?
209 map->unmap_ip(map, rip) : /* RIP -> IP */
210 rip;
211 return addr;
212 }
213
214 u64 map__objdump_2ip(struct map *map, u64 addr)
215 {
216 u64 ip = map->dso->adjust_symbols ?
217 addr :
218 map->unmap_ip(map, addr); /* RIP -> IP */
219 return ip;
220 }
221
222 void map_groups__init(struct map_groups *self)
223 {
224 int i;
225 for (i = 0; i < MAP__NR_TYPES; ++i) {
226 self->maps[i] = RB_ROOT;
227 INIT_LIST_HEAD(&self->removed_maps[i]);
228 }
229 self->machine = NULL;
230 }
231
232 static void maps__delete(struct rb_root *self)
233 {
234 struct rb_node *next = rb_first(self);
235
236 while (next) {
237 struct map *pos = rb_entry(next, struct map, rb_node);
238
239 next = rb_next(&pos->rb_node);
240 rb_erase(&pos->rb_node, self);
241 map__delete(pos);
242 }
243 }
244
245 static void maps__delete_removed(struct list_head *self)
246 {
247 struct map *pos, *n;
248
249 list_for_each_entry_safe(pos, n, self, node) {
250 list_del(&pos->node);
251 map__delete(pos);
252 }
253 }
254
255 void map_groups__exit(struct map_groups *self)
256 {
257 int i;
258
259 for (i = 0; i < MAP__NR_TYPES; ++i) {
260 maps__delete(&self->maps[i]);
261 maps__delete_removed(&self->removed_maps[i]);
262 }
263 }
264
265 void map_groups__flush(struct map_groups *self)
266 {
267 int type;
268
269 for (type = 0; type < MAP__NR_TYPES; type++) {
270 struct rb_root *root = &self->maps[type];
271 struct rb_node *next = rb_first(root);
272
273 while (next) {
274 struct map *pos = rb_entry(next, struct map, rb_node);
275 next = rb_next(&pos->rb_node);
276 rb_erase(&pos->rb_node, root);
277 /*
278 * We may have references to this map, for
279 * instance in some hist_entry instances, so
280 * just move them to a separate list.
281 */
282 list_add_tail(&pos->node, &self->removed_maps[pos->type]);
283 }
284 }
285 }
286
287 struct symbol *map_groups__find_symbol(struct map_groups *self,
288 enum map_type type, u64 addr,
289 struct map **mapp,
290 symbol_filter_t filter)
291 {
292 struct map *map = map_groups__find(self, type, addr);
293
294 if (map != NULL) {
295 if (mapp != NULL)
296 *mapp = map;
297 return map__find_symbol(map, map->map_ip(map, addr), filter);
298 }
299
300 return NULL;
301 }
302
303 struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
304 enum map_type type,
305 const char *name,
306 struct map **mapp,
307 symbol_filter_t filter)
308 {
309 struct rb_node *nd;
310
311 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
312 struct map *pos = rb_entry(nd, struct map, rb_node);
313 struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
314
315 if (sym == NULL)
316 continue;
317 if (mapp != NULL)
318 *mapp = pos;
319 return sym;
320 }
321
322 return NULL;
323 }
324
325 size_t __map_groups__fprintf_maps(struct map_groups *self,
326 enum map_type type, int verbose, FILE *fp)
327 {
328 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
329 struct rb_node *nd;
330
331 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
332 struct map *pos = rb_entry(nd, struct map, rb_node);
333 printed += fprintf(fp, "Map:");
334 printed += map__fprintf(pos, fp);
335 if (verbose > 2) {
336 printed += dso__fprintf(pos->dso, type, fp);
337 printed += fprintf(fp, "--\n");
338 }
339 }
340
341 return printed;
342 }
343
344 size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp)
345 {
346 size_t printed = 0, i;
347 for (i = 0; i < MAP__NR_TYPES; ++i)
348 printed += __map_groups__fprintf_maps(self, i, verbose, fp);
349 return printed;
350 }
351
352 static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
353 enum map_type type,
354 int verbose, FILE *fp)
355 {
356 struct map *pos;
357 size_t printed = 0;
358
359 list_for_each_entry(pos, &self->removed_maps[type], node) {
360 printed += fprintf(fp, "Map:");
361 printed += map__fprintf(pos, fp);
362 if (verbose > 1) {
363 printed += dso__fprintf(pos->dso, type, fp);
364 printed += fprintf(fp, "--\n");
365 }
366 }
367 return printed;
368 }
369
370 static size_t map_groups__fprintf_removed_maps(struct map_groups *self,
371 int verbose, FILE *fp)
372 {
373 size_t printed = 0, i;
374 for (i = 0; i < MAP__NR_TYPES; ++i)
375 printed += __map_groups__fprintf_removed_maps(self, i, verbose, fp);
376 return printed;
377 }
378
379 size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp)
380 {
381 size_t printed = map_groups__fprintf_maps(self, verbose, fp);
382 printed += fprintf(fp, "Removed maps:\n");
383 return printed + map_groups__fprintf_removed_maps(self, verbose, fp);
384 }
385
386 int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
387 int verbose, FILE *fp)
388 {
389 struct rb_root *root = &self->maps[map->type];
390 struct rb_node *next = rb_first(root);
391 int err = 0;
392
393 while (next) {
394 struct map *pos = rb_entry(next, struct map, rb_node);
395 next = rb_next(&pos->rb_node);
396
397 if (!map__overlap(pos, map))
398 continue;
399
400 if (verbose >= 2) {
401 fputs("overlapping maps:\n", fp);
402 map__fprintf(map, fp);
403 map__fprintf(pos, fp);
404 }
405
406 rb_erase(&pos->rb_node, root);
407 /*
408 * Now check if we need to create new maps for areas not
409 * overlapped by the new map:
410 */
411 if (map->start > pos->start) {
412 struct map *before = map__clone(pos);
413
414 if (before == NULL) {
415 err = -ENOMEM;
416 goto move_map;
417 }
418
419 before->end = map->start - 1;
420 map_groups__insert(self, before);
421 if (verbose >= 2)
422 map__fprintf(before, fp);
423 }
424
425 if (map->end < pos->end) {
426 struct map *after = map__clone(pos);
427
428 if (after == NULL) {
429 err = -ENOMEM;
430 goto move_map;
431 }
432
433 after->start = map->end + 1;
434 map_groups__insert(self, after);
435 if (verbose >= 2)
436 map__fprintf(after, fp);
437 }
438 move_map:
439 /*
440 * If we have references, just move them to a separate list.
441 */
442 if (pos->referenced)
443 list_add_tail(&pos->node, &self->removed_maps[map->type]);
444 else
445 map__delete(pos);
446
447 if (err)
448 return err;
449 }
450
451 return 0;
452 }
453
454 /*
455 * XXX This should not really _copy_ te maps, but refcount them.
456 */
457 int map_groups__clone(struct map_groups *self,
458 struct map_groups *parent, enum map_type type)
459 {
460 struct rb_node *nd;
461 for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
462 struct map *map = rb_entry(nd, struct map, rb_node);
463 struct map *new = map__clone(map);
464 if (new == NULL)
465 return -ENOMEM;
466 map_groups__insert(self, new);
467 }
468 return 0;
469 }
470
471 static u64 map__reloc_map_ip(struct map *map, u64 ip)
472 {
473 return ip + (s64)map->pgoff;
474 }
475
476 static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
477 {
478 return ip - (s64)map->pgoff;
479 }
480
481 void map__reloc_vmlinux(struct map *self)
482 {
483 struct kmap *kmap = map__kmap(self);
484 s64 reloc;
485
486 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
487 return;
488
489 reloc = (kmap->ref_reloc_sym->unrelocated_addr -
490 kmap->ref_reloc_sym->addr);
491
492 if (!reloc)
493 return;
494
495 self->map_ip = map__reloc_map_ip;
496 self->unmap_ip = map__reloc_unmap_ip;
497 self->pgoff = reloc;
498 }
499
500 void maps__insert(struct rb_root *maps, struct map *map)
501 {
502 struct rb_node **p = &maps->rb_node;
503 struct rb_node *parent = NULL;
504 const u64 ip = map->start;
505 struct map *m;
506
507 while (*p != NULL) {
508 parent = *p;
509 m = rb_entry(parent, struct map, rb_node);
510 if (ip < m->start)
511 p = &(*p)->rb_left;
512 else
513 p = &(*p)->rb_right;
514 }
515
516 rb_link_node(&map->rb_node, parent, p);
517 rb_insert_color(&map->rb_node, maps);
518 }
519
520 void maps__remove(struct rb_root *self, struct map *map)
521 {
522 rb_erase(&map->rb_node, self);
523 }
524
525 struct map *maps__find(struct rb_root *maps, u64 ip)
526 {
527 struct rb_node **p = &maps->rb_node;
528 struct rb_node *parent = NULL;
529 struct map *m;
530
531 while (*p != NULL) {
532 parent = *p;
533 m = rb_entry(parent, struct map, rb_node);
534 if (ip < m->start)
535 p = &(*p)->rb_left;
536 else if (ip > m->end)
537 p = &(*p)->rb_right;
538 else
539 return m;
540 }
541
542 return NULL;
543 }
544
545 int machine__init(struct machine *self, const char *root_dir, pid_t pid)
546 {
547 map_groups__init(&self->kmaps);
548 RB_CLEAR_NODE(&self->rb_node);
549 INIT_LIST_HEAD(&self->user_dsos);
550 INIT_LIST_HEAD(&self->kernel_dsos);
551
552 self->kmaps.machine = self;
553 self->pid = pid;
554 self->root_dir = strdup(root_dir);
555 return self->root_dir == NULL ? -ENOMEM : 0;
556 }
557
558 static void dsos__delete(struct list_head *self)
559 {
560 struct dso *pos, *n;
561
562 list_for_each_entry_safe(pos, n, self, node) {
563 list_del(&pos->node);
564 dso__delete(pos);
565 }
566 }
567
568 void machine__exit(struct machine *self)
569 {
570 map_groups__exit(&self->kmaps);
571 dsos__delete(&self->user_dsos);
572 dsos__delete(&self->kernel_dsos);
573 free(self->root_dir);
574 self->root_dir = NULL;
575 }
576
577 void machine__delete(struct machine *self)
578 {
579 machine__exit(self);
580 free(self);
581 }
582
583 struct machine *machines__add(struct rb_root *self, pid_t pid,
584 const char *root_dir)
585 {
586 struct rb_node **p = &self->rb_node;
587 struct rb_node *parent = NULL;
588 struct machine *pos, *machine = malloc(sizeof(*machine));
589
590 if (!machine)
591 return NULL;
592
593 if (machine__init(machine, root_dir, pid) != 0) {
594 free(machine);
595 return NULL;
596 }
597
598 while (*p != NULL) {
599 parent = *p;
600 pos = rb_entry(parent, struct machine, rb_node);
601 if (pid < pos->pid)
602 p = &(*p)->rb_left;
603 else
604 p = &(*p)->rb_right;
605 }
606
607 rb_link_node(&machine->rb_node, parent, p);
608 rb_insert_color(&machine->rb_node, self);
609
610 return machine;
611 }
612
613 struct machine *machines__find(struct rb_root *self, pid_t pid)
614 {
615 struct rb_node **p = &self->rb_node;
616 struct rb_node *parent = NULL;
617 struct machine *machine;
618 struct machine *default_machine = NULL;
619
620 while (*p != NULL) {
621 parent = *p;
622 machine = rb_entry(parent, struct machine, rb_node);
623 if (pid < machine->pid)
624 p = &(*p)->rb_left;
625 else if (pid > machine->pid)
626 p = &(*p)->rb_right;
627 else
628 return machine;
629 if (!machine->pid)
630 default_machine = machine;
631 }
632
633 return default_machine;
634 }
635
636 struct machine *machines__findnew(struct rb_root *self, pid_t pid)
637 {
638 char path[PATH_MAX];
639 const char *root_dir;
640 struct machine *machine = machines__find(self, pid);
641
642 if (!machine || machine->pid != pid) {
643 if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID)
644 root_dir = "";
645 else {
646 if (!symbol_conf.guestmount)
647 goto out;
648 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
649 if (access(path, R_OK)) {
650 pr_err("Can't access file %s\n", path);
651 goto out;
652 }
653 root_dir = path;
654 }
655 machine = machines__add(self, pid, root_dir);
656 }
657
658 out:
659 return machine;
660 }
661
662 void machines__process(struct rb_root *self, machine__process_t process, void *data)
663 {
664 struct rb_node *nd;
665
666 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
667 struct machine *pos = rb_entry(nd, struct machine, rb_node);
668 process(pos, data);
669 }
670 }
671
672 char *machine__mmap_name(struct machine *self, char *bf, size_t size)
673 {
674 if (machine__is_host(self))
675 snprintf(bf, size, "[%s]", "kernel.kallsyms");
676 else if (machine__is_default_guest(self))
677 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
678 else
679 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid);
680
681 return bf;
682 }
This page took 0.068628 seconds and 5 git commands to generate.