1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/zalloc.h>
14 struct rb_node rb_node;
18 #define maps__for_each_entry(maps, map) \
19 for (map = maps__first(maps); map; map = map_rb_node__next(map))
21 #define maps__for_each_entry_safe(maps, map, next) \
22 for (map = maps__first(maps), next = map_rb_node__next(map); map; \
23 map = next, next = map_rb_node__next(map))
25 static struct rb_root *maps__entries(struct maps *maps)
27 return &RC_CHK_ACCESS(maps)->entries;
30 static struct rw_semaphore *maps__lock(struct maps *maps)
32 return &RC_CHK_ACCESS(maps)->lock;
35 static struct map **maps__maps_by_name(struct maps *maps)
37 return RC_CHK_ACCESS(maps)->maps_by_name;
40 static struct map_rb_node *maps__first(struct maps *maps)
42 struct rb_node *first = rb_first(maps__entries(maps));
45 return rb_entry(first, struct map_rb_node, rb_node);
49 static struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
56 next = rb_next(&node->rb_node);
61 return rb_entry(next, struct map_rb_node, rb_node);
64 static struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
66 struct map_rb_node *rb_node;
68 maps__for_each_entry(maps, rb_node) {
69 if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map))
75 static void maps__init(struct maps *maps, struct machine *machine)
77 refcount_set(maps__refcnt(maps), 1);
78 init_rwsem(maps__lock(maps));
79 RC_CHK_ACCESS(maps)->entries = RB_ROOT;
80 RC_CHK_ACCESS(maps)->machine = machine;
81 RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
82 RC_CHK_ACCESS(maps)->nr_maps = 0;
83 RC_CHK_ACCESS(maps)->maps_by_name = NULL;
86 static void __maps__free_maps_by_name(struct maps *maps)
89 * Free everything to try to do it from the rbtree in the next search
91 for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
92 map__put(maps__maps_by_name(maps)[i]);
94 zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
95 RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
98 static int __maps__insert(struct maps *maps, struct map *map)
100 struct rb_node **p = &maps__entries(maps)->rb_node;
101 struct rb_node *parent = NULL;
102 const u64 ip = map__start(map);
103 struct map_rb_node *m, *new_rb_node;
105 new_rb_node = malloc(sizeof(*new_rb_node));
109 RB_CLEAR_NODE(&new_rb_node->rb_node);
110 new_rb_node->map = map__get(map);
114 m = rb_entry(parent, struct map_rb_node, rb_node);
115 if (ip < map__start(m->map))
121 rb_link_node(&new_rb_node->rb_node, parent, p);
122 rb_insert_color(&new_rb_node->rb_node, maps__entries(maps));
126 int maps__insert(struct maps *maps, struct map *map)
129 const struct dso *dso = map__dso(map);
131 down_write(maps__lock(maps));
132 err = __maps__insert(maps, map);
136 ++RC_CHK_ACCESS(maps)->nr_maps;
138 if (dso && dso->kernel) {
139 struct kmap *kmap = map__kmap(map);
144 pr_err("Internal error: kernel dso with non kernel map\n");
149 * If we already performed some search by name, then we need to add the just
150 * inserted map and resort.
152 if (maps__maps_by_name(maps)) {
153 if (maps__nr_maps(maps) > RC_CHK_ACCESS(maps)->nr_maps_allocated) {
154 int nr_allocate = maps__nr_maps(maps) * 2;
155 struct map **maps_by_name = realloc(maps__maps_by_name(maps),
156 nr_allocate * sizeof(map));
158 if (maps_by_name == NULL) {
159 __maps__free_maps_by_name(maps);
164 RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
165 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
167 maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map__get(map);
168 __maps__sort_by_name(maps);
171 up_write(maps__lock(maps));
175 static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node)
177 rb_erase_init(&rb_node->rb_node, maps__entries(maps));
178 map__put(rb_node->map);
182 void maps__remove(struct maps *maps, struct map *map)
184 struct map_rb_node *rb_node;
186 down_write(maps__lock(maps));
187 if (RC_CHK_ACCESS(maps)->last_search_by_name == map)
188 RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
190 rb_node = maps__find_node(maps, map);
191 assert(rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map));
192 __maps__remove(maps, rb_node);
193 if (maps__maps_by_name(maps))
194 __maps__free_maps_by_name(maps);
195 --RC_CHK_ACCESS(maps)->nr_maps;
196 up_write(maps__lock(maps));
199 static void __maps__purge(struct maps *maps)
201 struct map_rb_node *pos, *next;
203 if (maps__maps_by_name(maps))
204 __maps__free_maps_by_name(maps);
206 maps__for_each_entry_safe(maps, pos, next) {
207 rb_erase_init(&pos->rb_node, maps__entries(maps));
213 static void maps__exit(struct maps *maps)
215 down_write(maps__lock(maps));
217 up_write(maps__lock(maps));
220 bool maps__empty(struct maps *maps)
222 return !maps__first(maps);
225 struct maps *maps__new(struct machine *machine)
228 RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
230 if (ADD_RC_CHK(result, maps))
231 maps__init(result, machine);
236 static void maps__delete(struct maps *maps)
239 unwind__finish_access(maps);
243 struct maps *maps__get(struct maps *maps)
247 if (RC_CHK_GET(result, maps))
248 refcount_inc(maps__refcnt(maps));
253 void maps__put(struct maps *maps)
255 if (maps && refcount_dec_and_test(maps__refcnt(maps)))
261 int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
263 struct map_rb_node *pos;
266 down_read(maps__lock(maps));
267 maps__for_each_entry(maps, pos) {
268 ret = cb(pos->map, data);
272 up_read(maps__lock(maps));
276 void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
278 struct map_rb_node *pos, *next;
279 unsigned int start_nr_maps;
281 down_write(maps__lock(maps));
283 start_nr_maps = maps__nr_maps(maps);
284 maps__for_each_entry_safe(maps, pos, next) {
285 if (cb(pos->map, data)) {
286 __maps__remove(maps, pos);
287 --RC_CHK_ACCESS(maps)->nr_maps;
290 if (maps__maps_by_name(maps) && start_nr_maps != maps__nr_maps(maps))
291 __maps__free_maps_by_name(maps);
293 up_write(maps__lock(maps));
296 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
298 struct map *map = maps__find(maps, addr);
300 /* Ensure map is loaded before using map->map_ip */
301 if (map != NULL && map__load(map) >= 0) {
304 return map__find_symbol(map, map__map_ip(map, addr));
310 struct maps__find_symbol_by_name_args {
316 static int maps__find_symbol_by_name_cb(struct map *map, void *data)
318 struct maps__find_symbol_by_name_args *args = data;
320 args->sym = map__find_symbol_by_name(map, args->name);
324 if (!map__contains_symbol(map, args->sym)) {
329 if (args->mapp != NULL)
330 *args->mapp = map__get(map);
334 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
336 struct maps__find_symbol_by_name_args args = {
342 maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args);
346 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
348 if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
351 ams->ms.map = maps__find(maps, ams->addr);
352 if (ams->ms.map == NULL)
356 ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
357 ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
359 return ams->ms.sym ? 0 : -1;
362 struct maps__fprintf_args {
367 static int maps__fprintf_cb(struct map *map, void *data)
369 struct maps__fprintf_args *args = data;
371 args->printed += fprintf(args->fp, "Map:");
372 args->printed += map__fprintf(map, args->fp);
374 args->printed += dso__fprintf(map__dso(map), args->fp);
375 args->printed += fprintf(args->fp, "--\n");
380 size_t maps__fprintf(struct maps *maps, FILE *fp)
382 struct maps__fprintf_args args = {
387 maps__for_each_map(maps, maps__fprintf_cb, &args);
393 * Find first map where end > map->start.
394 * Same as find_vma() in kernel.
396 static struct rb_node *first_ending_after(struct maps *maps, const struct map *map)
398 struct rb_root *root;
399 struct rb_node *next, *first;
401 root = maps__entries(maps);
402 next = root->rb_node;
405 struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
407 if (map__end(pos->map) > map__start(map)) {
409 if (map__start(pos->map) <= map__start(map))
411 next = next->rb_left;
413 next = next->rb_right;
419 * Adds new to maps, if new overlaps existing entries then the existing maps are
420 * adjusted or removed so that new fits without overlapping any entries.
422 int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
425 struct rb_node *next;
427 FILE *fp = debug_file();
429 down_write(maps__lock(maps));
431 next = first_ending_after(maps, new);
432 while (next && !err) {
433 struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
434 next = rb_next(&pos->rb_node);
437 * Stop if current map starts after map->end.
438 * Maps are ordered by start: next will not overlap for sure.
440 if (map__start(pos->map) >= map__end(new))
446 pr_debug("overlapping maps in %s (disable tui for more info)\n",
447 map__dso(new)->name);
449 pr_debug("overlapping maps:\n");
450 map__fprintf(new, fp);
451 map__fprintf(pos->map, fp);
455 rb_erase_init(&pos->rb_node, maps__entries(maps));
457 * Now check if we need to create new maps for areas not
458 * overlapped by the new map:
460 if (map__start(new) > map__start(pos->map)) {
461 struct map *before = map__clone(pos->map);
463 if (before == NULL) {
468 map__set_end(before, map__start(new));
469 err = __maps__insert(maps, before);
475 if (verbose >= 2 && !use_browser)
476 map__fprintf(before, fp);
480 if (map__end(new) < map__end(pos->map)) {
481 struct map *after = map__clone(pos->map);
488 map__set_start(after, map__end(new));
489 map__add_pgoff(after, map__end(new) - map__start(pos->map));
490 assert(map__map_ip(pos->map, map__end(new)) ==
491 map__map_ip(after, map__end(new)));
492 err = __maps__insert(maps, after);
497 if (verbose >= 2 && !use_browser)
498 map__fprintf(after, fp);
506 err = __maps__insert(maps, new);
507 up_write(maps__lock(maps));
511 int maps__copy_from(struct maps *maps, struct maps *parent)
514 struct map_rb_node *rb_node;
516 down_read(maps__lock(parent));
518 maps__for_each_entry(parent, rb_node) {
519 struct map *new = map__clone(rb_node->map);
526 err = unwind__prepare_access(maps, new, NULL);
530 err = maps__insert(maps, new);
539 up_read(maps__lock(parent));
543 struct map *maps__find(struct maps *maps, u64 ip)
546 struct map_rb_node *m;
549 down_read(maps__lock(maps));
551 p = maps__entries(maps)->rb_node;
553 m = rb_entry(p, struct map_rb_node, rb_node);
554 if (ip < map__start(m->map))
556 else if (ip >= map__end(m->map))
564 up_read(maps__lock(maps));
565 return m ? m->map : NULL;
568 static int map__strcmp(const void *a, const void *b)
570 const struct map *map_a = *(const struct map **)a;
571 const struct map *map_b = *(const struct map **)b;
572 const struct dso *dso_a = map__dso(map_a);
573 const struct dso *dso_b = map__dso(map_b);
574 int ret = strcmp(dso_a->short_name, dso_b->short_name);
576 if (ret == 0 && map_a != map_b) {
578 * Ensure distinct but name equal maps have an order in part to
579 * aid reference counting.
581 ret = (int)map__start(map_a) - (int)map__start(map_b);
583 ret = (int)((intptr_t)map_a - (intptr_t)map_b);
589 static int map__strcmp_name(const void *name, const void *b)
591 const struct dso *dso = map__dso(*(const struct map **)b);
593 return strcmp(name, dso->short_name);
596 void __maps__sort_by_name(struct maps *maps)
598 qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
601 static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
603 struct map_rb_node *rb_node;
604 struct map **maps_by_name = realloc(maps__maps_by_name(maps),
605 maps__nr_maps(maps) * sizeof(struct map *));
608 if (maps_by_name == NULL)
611 up_read(maps__lock(maps));
612 down_write(maps__lock(maps));
614 RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
615 RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
617 maps__for_each_entry(maps, rb_node)
618 maps_by_name[i++] = map__get(rb_node->map);
620 __maps__sort_by_name(maps);
622 up_write(maps__lock(maps));
623 down_read(maps__lock(maps));
628 static struct map *__maps__find_by_name(struct maps *maps, const char *name)
632 if (maps__maps_by_name(maps) == NULL &&
633 map__groups__sort_by_name_from_rbtree(maps))
636 mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
637 sizeof(*mapp), map__strcmp_name);
643 struct map *maps__find_by_name(struct maps *maps, const char *name)
645 struct map_rb_node *rb_node;
648 down_read(maps__lock(maps));
651 if (RC_CHK_ACCESS(maps)->last_search_by_name) {
652 const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
654 if (strcmp(dso->short_name, name) == 0) {
655 map = RC_CHK_ACCESS(maps)->last_search_by_name;
660 * If we have maps->maps_by_name, then the name isn't in the rbtree,
661 * as maps->maps_by_name mirrors the rbtree when lookups by name are
664 map = __maps__find_by_name(maps, name);
665 if (map || maps__maps_by_name(maps) != NULL)
668 /* Fallback to traversing the rbtree... */
669 maps__for_each_entry(maps, rb_node) {
674 if (strcmp(dso->short_name, name) == 0) {
675 RC_CHK_ACCESS(maps)->last_search_by_name = map;
682 up_read(maps__lock(maps));
686 struct map *maps__find_next_entry(struct maps *maps, struct map *map)
688 struct map_rb_node *rb_node = maps__find_node(maps, map);
689 struct map_rb_node *next = map_rb_node__next(rb_node);
697 void maps__fixup_end(struct maps *maps)
699 struct map_rb_node *prev = NULL, *curr;
701 down_write(maps__lock(maps));
703 maps__for_each_entry(maps, curr) {
704 if (prev && (!map__end(prev->map) || map__end(prev->map) > map__start(curr->map)))
705 map__set_end(prev->map, map__start(curr->map));
711 * We still haven't the actual symbols, so guess the
712 * last map final address.
714 if (curr && !map__end(curr->map))
715 map__set_end(curr->map, ~0ULL);
717 up_write(maps__lock(maps));
721 * Merges map into maps by splitting the new map within the existing map
724 int maps__merge_in(struct maps *kmaps, struct map *new_map)
726 struct map_rb_node *rb_node;
727 struct rb_node *first;
732 down_read(maps__lock(kmaps));
733 first = first_ending_after(kmaps, new_map);
734 rb_node = first ? rb_entry(first, struct map_rb_node, rb_node) : NULL;
735 overlaps = rb_node && map__start(rb_node->map) < map__end(new_map);
736 up_read(maps__lock(kmaps));
739 return maps__insert(kmaps, new_map);
741 maps__for_each_entry(kmaps, rb_node) {
742 struct map *old_map = rb_node->map;
744 /* no overload with this one */
745 if (map__end(new_map) < map__start(old_map) ||
746 map__start(new_map) >= map__end(old_map))
749 if (map__start(new_map) < map__start(old_map)) {
754 if (map__end(new_map) < map__end(old_map)) {
756 * |new......| -> |new..|
757 * |old....| -> |old....|
759 map__set_end(new_map, map__start(old_map));
762 * |new.............| -> |new..| |new..|
763 * |old....| -> |old....|
765 struct map_list_node *m = map_list_node__new();
772 m->map = map__clone(new_map);
779 map__set_end(m->map, map__start(old_map));
780 list_add_tail(&m->node, &merged);
781 map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
782 map__set_start(new_map, map__end(old_map));
789 if (map__end(new_map) < map__end(old_map)) {
792 * |old.........| -> |old.........|
799 * |new......| -> |new...|
800 * |old....| -> |old....|
802 map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
803 map__set_start(new_map, map__end(old_map));
809 while (!list_empty(&merged)) {
810 struct map_list_node *old_node;
812 old_node = list_entry(merged.next, struct map_list_node, node);
813 list_del_init(&old_node->node);
815 err = maps__insert(kmaps, old_node->map);
816 map__put(old_node->map);
822 err = maps__insert(kmaps, new_map);
828 void maps__load_first(struct maps *maps)
830 struct map_rb_node *first;
832 down_read(maps__lock(maps));
834 first = maps__first(maps);
836 map__load(first->map);
838 up_read(maps__lock(maps));