1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/zalloc.h>
13 #include <internal/rc_check.h>
16 * Locking/sorting note:
18 * Sorting is done with the write lock, iteration and binary searching happens
19 * under the read lock requiring being sorted. There is a race between sorting
20 * releasing the write lock and acquiring the read lock for iteration/searching
21 * where another thread could insert and break the sorting of the maps. In
22 * practice inserting maps should be rare meaning that the race shouldn't lead
23 * to live lock. Removal of maps doesn't break being sorted.
26 DECLARE_RC_STRUCT(maps) {
27 struct rw_semaphore lock;
29 * @maps_by_address: array of maps sorted by their starting address if
30 * maps_by_address_sorted is true.
32 struct map **maps_by_address;
34 * @maps_by_name: optional array of maps sorted by their dso name if
35 * maps_by_name_sorted is true.
37 struct map **maps_by_name;
38 struct machine *machine;
39 #ifdef HAVE_LIBUNWIND_SUPPORT
41 const struct unwind_libunwind_ops *unwind_libunwind_ops;
45 * @nr_maps: number of maps_by_address, and possibly maps_by_name,
46 * entries that contain maps.
50 * @nr_maps_allocated: number of entries in maps_by_address and possibly
53 unsigned int nr_maps_allocated;
55 * @last_search_by_name_idx: cache of last found by name entry's index
56 * as frequent searches for the same dso name are common.
58 unsigned int last_search_by_name_idx;
59 /** @maps_by_address_sorted: is maps_by_address sorted. */
60 bool maps_by_address_sorted;
61 /** @maps_by_name_sorted: is maps_by_name sorted. */
62 bool maps_by_name_sorted;
63 /** @ends_broken: does the map contain a map where end values are unset/unsorted? */
67 static void check_invariants(const struct maps *maps __maybe_unused)
70 assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated);
71 for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
72 struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i];
74 /* Check map is well-formed. */
75 assert(map__end(map) == 0 || map__start(map) <= map__end(map));
76 /* Expect at least 1 reference count. */
77 assert(refcount_read(map__refcnt(map)) > 0);
79 if (map__dso(map) && map__dso(map)->kernel)
80 assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps));
83 struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1];
85 /* If addresses are sorted... */
86 if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) {
87 /* Maps should be in start address order. */
88 assert(map__start(prev) <= map__start(map));
90 * If the ends of maps aren't broken (during
91 * construction) then they should be ordered
94 if (!RC_CHK_ACCESS(maps)->ends_broken) {
95 assert(map__end(prev) <= map__end(map));
96 assert(map__end(prev) <= map__start(map) ||
97 map__start(prev) == map__start(map));
102 if (RC_CHK_ACCESS(maps)->maps_by_name) {
103 for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
104 struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i];
107 * Maps by name maps should be in maps_by_address, so
108 * the reference count should be higher.
110 assert(refcount_read(map__refcnt(map)) > 1);
116 static struct map **maps__maps_by_address(const struct maps *maps)
118 return RC_CHK_ACCESS(maps)->maps_by_address;
121 static void maps__set_maps_by_address(struct maps *maps, struct map **new)
123 RC_CHK_ACCESS(maps)->maps_by_address = new;
127 static struct map ***maps__maps_by_name_addr(struct maps *maps)
129 return &RC_CHK_ACCESS(maps)->maps_by_name;
132 static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated)
134 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated;
137 static void maps__set_nr_maps(struct maps *maps, unsigned int nr_maps)
139 RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
142 /* Not in the header, to aid reference counting. */
143 static struct map **maps__maps_by_name(const struct maps *maps)
145 return RC_CHK_ACCESS(maps)->maps_by_name;
149 static void maps__set_maps_by_name(struct maps *maps, struct map **new)
151 RC_CHK_ACCESS(maps)->maps_by_name = new;
155 static bool maps__maps_by_address_sorted(const struct maps *maps)
157 return RC_CHK_ACCESS(maps)->maps_by_address_sorted;
160 static void maps__set_maps_by_address_sorted(struct maps *maps, bool value)
162 RC_CHK_ACCESS(maps)->maps_by_address_sorted = value;
165 static bool maps__maps_by_name_sorted(const struct maps *maps)
167 return RC_CHK_ACCESS(maps)->maps_by_name_sorted;
170 static void maps__set_maps_by_name_sorted(struct maps *maps, bool value)
172 RC_CHK_ACCESS(maps)->maps_by_name_sorted = value;
175 struct machine *maps__machine(const struct maps *maps)
177 return RC_CHK_ACCESS(maps)->machine;
180 unsigned int maps__nr_maps(const struct maps *maps)
182 return RC_CHK_ACCESS(maps)->nr_maps;
185 refcount_t *maps__refcnt(struct maps *maps)
187 return &RC_CHK_ACCESS(maps)->refcnt;
190 #ifdef HAVE_LIBUNWIND_SUPPORT
191 void *maps__addr_space(const struct maps *maps)
193 return RC_CHK_ACCESS(maps)->addr_space;
196 void maps__set_addr_space(struct maps *maps, void *addr_space)
198 RC_CHK_ACCESS(maps)->addr_space = addr_space;
201 const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps)
203 return RC_CHK_ACCESS(maps)->unwind_libunwind_ops;
206 void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops)
208 RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops;
212 static struct rw_semaphore *maps__lock(struct maps *maps)
215 * When the lock is acquired or released the maps invariants should
218 check_invariants(maps);
219 return &RC_CHK_ACCESS(maps)->lock;
222 static void maps__init(struct maps *maps, struct machine *machine)
224 init_rwsem(maps__lock(maps));
225 RC_CHK_ACCESS(maps)->maps_by_address = NULL;
226 RC_CHK_ACCESS(maps)->maps_by_name = NULL;
227 RC_CHK_ACCESS(maps)->machine = machine;
228 #ifdef HAVE_LIBUNWIND_SUPPORT
229 RC_CHK_ACCESS(maps)->addr_space = NULL;
230 RC_CHK_ACCESS(maps)->unwind_libunwind_ops = NULL;
232 refcount_set(maps__refcnt(maps), 1);
233 RC_CHK_ACCESS(maps)->nr_maps = 0;
234 RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
235 RC_CHK_ACCESS(maps)->last_search_by_name_idx = 0;
236 RC_CHK_ACCESS(maps)->maps_by_address_sorted = true;
237 RC_CHK_ACCESS(maps)->maps_by_name_sorted = false;
240 static void maps__exit(struct maps *maps)
242 struct map **maps_by_address = maps__maps_by_address(maps);
243 struct map **maps_by_name = maps__maps_by_name(maps);
245 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
246 map__zput(maps_by_address[i]);
248 map__zput(maps_by_name[i]);
250 zfree(&maps_by_address);
251 zfree(&maps_by_name);
252 unwind__finish_access(maps);
255 struct maps *maps__new(struct machine *machine)
258 RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
260 if (ADD_RC_CHK(result, maps))
261 maps__init(result, machine);
266 static void maps__delete(struct maps *maps)
272 struct maps *maps__get(struct maps *maps)
276 if (RC_CHK_GET(result, maps))
277 refcount_inc(maps__refcnt(maps));
282 void maps__put(struct maps *maps)
284 if (maps && refcount_dec_and_test(maps__refcnt(maps)))
290 static void __maps__free_maps_by_name(struct maps *maps)
293 * Free everything to try to do it from the rbtree in the next search
295 for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
296 map__put(maps__maps_by_name(maps)[i]);
298 zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
301 static int map__start_cmp(const void *a, const void *b)
303 const struct map *map_a = *(const struct map * const *)a;
304 const struct map *map_b = *(const struct map * const *)b;
305 u64 map_a_start = map__start(map_a);
306 u64 map_b_start = map__start(map_b);
308 if (map_a_start == map_b_start) {
309 u64 map_a_end = map__end(map_a);
310 u64 map_b_end = map__end(map_b);
312 if (map_a_end == map_b_end) {
313 /* Ensure maps with the same addresses have a fixed order. */
314 if (RC_CHK_ACCESS(map_a) == RC_CHK_ACCESS(map_b))
316 return (intptr_t)RC_CHK_ACCESS(map_a) > (intptr_t)RC_CHK_ACCESS(map_b)
319 return map_a_end > map_b_end ? 1 : -1;
321 return map_a_start > map_b_start ? 1 : -1;
324 static void __maps__sort_by_address(struct maps *maps)
326 if (maps__maps_by_address_sorted(maps))
329 qsort(maps__maps_by_address(maps),
331 sizeof(struct map *),
333 maps__set_maps_by_address_sorted(maps, true);
336 static void maps__sort_by_address(struct maps *maps)
338 down_write(maps__lock(maps));
339 __maps__sort_by_address(maps);
340 up_write(maps__lock(maps));
343 static int map__strcmp(const void *a, const void *b)
345 const struct map *map_a = *(const struct map * const *)a;
346 const struct map *map_b = *(const struct map * const *)b;
347 const struct dso *dso_a = map__dso(map_a);
348 const struct dso *dso_b = map__dso(map_b);
349 int ret = strcmp(dso_a->short_name, dso_b->short_name);
351 if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) {
352 /* Ensure distinct but name equal maps have an order. */
353 return map__start_cmp(a, b);
358 static int maps__sort_by_name(struct maps *maps)
361 down_write(maps__lock(maps));
362 if (!maps__maps_by_name_sorted(maps)) {
363 struct map **maps_by_name = maps__maps_by_name(maps);
366 maps_by_name = malloc(RC_CHK_ACCESS(maps)->nr_maps_allocated *
367 sizeof(*maps_by_name));
371 struct map **maps_by_address = maps__maps_by_address(maps);
372 unsigned int n = maps__nr_maps(maps);
374 maps__set_maps_by_name(maps, maps_by_name);
375 for (unsigned int i = 0; i < n; i++)
376 maps_by_name[i] = map__get(maps_by_address[i]);
382 sizeof(struct map *),
384 maps__set_maps_by_name_sorted(maps, true);
387 up_write(maps__lock(maps));
391 static unsigned int maps__by_address_index(const struct maps *maps, const struct map *map)
393 struct map **maps_by_address = maps__maps_by_address(maps);
395 if (maps__maps_by_address_sorted(maps)) {
397 bsearch(&map, maps__maps_by_address(maps), maps__nr_maps(maps),
398 sizeof(*mapp), map__start_cmp);
401 return mapp - maps_by_address;
403 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
404 if (RC_CHK_ACCESS(maps_by_address[i]) == RC_CHK_ACCESS(map))
408 pr_err("Map missing from maps");
412 static unsigned int maps__by_name_index(const struct maps *maps, const struct map *map)
414 struct map **maps_by_name = maps__maps_by_name(maps);
416 if (maps__maps_by_name_sorted(maps)) {
418 bsearch(&map, maps_by_name, maps__nr_maps(maps),
419 sizeof(*mapp), map__strcmp);
422 return mapp - maps_by_name;
424 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
425 if (RC_CHK_ACCESS(maps_by_name[i]) == RC_CHK_ACCESS(map))
429 pr_err("Map missing from maps");
433 static int __maps__insert(struct maps *maps, struct map *new)
435 struct map **maps_by_address = maps__maps_by_address(maps);
436 struct map **maps_by_name = maps__maps_by_name(maps);
437 const struct dso *dso = map__dso(new);
438 unsigned int nr_maps = maps__nr_maps(maps);
439 unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated;
441 if (nr_maps + 1 > nr_allocate) {
442 nr_allocate = !nr_allocate ? 32 : nr_allocate * 2;
444 maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new));
445 if (!maps_by_address)
448 maps__set_maps_by_address(maps, maps_by_address);
450 maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new));
453 * If by name fails, just disable by name and it will
454 * recompute next time it is required.
456 __maps__free_maps_by_name(maps);
458 maps__set_maps_by_name(maps, maps_by_name);
460 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
462 /* Insert the value at the end. */
463 maps_by_address[nr_maps] = map__get(new);
465 maps_by_name[nr_maps] = map__get(new);
468 RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
471 * Recompute if things are sorted. If things are inserted in a sorted
472 * manner, for example by processing /proc/pid/maps, then no
473 * sorting/resorting will be necessary.
476 /* If there's just 1 entry then maps are sorted. */
477 maps__set_maps_by_address_sorted(maps, true);
478 maps__set_maps_by_name_sorted(maps, maps_by_name != NULL);
480 /* Sorted if maps were already sorted and this map starts after the last one. */
481 maps__set_maps_by_address_sorted(maps,
482 maps__maps_by_address_sorted(maps) &&
483 map__end(maps_by_address[nr_maps - 2]) <= map__start(new));
484 maps__set_maps_by_name_sorted(maps, false);
486 if (map__end(new) < map__start(new))
487 RC_CHK_ACCESS(maps)->ends_broken = true;
488 if (dso && dso->kernel) {
489 struct kmap *kmap = map__kmap(new);
494 pr_err("Internal error: kernel dso with non kernel map\n");
499 int maps__insert(struct maps *maps, struct map *map)
503 down_write(maps__lock(maps));
504 ret = __maps__insert(maps, map);
505 up_write(maps__lock(maps));
509 static void __maps__remove(struct maps *maps, struct map *map)
511 struct map **maps_by_address = maps__maps_by_address(maps);
512 struct map **maps_by_name = maps__maps_by_name(maps);
513 unsigned int nr_maps = maps__nr_maps(maps);
514 unsigned int address_idx;
516 /* Slide later mappings over the one to remove */
517 address_idx = maps__by_address_index(maps, map);
518 map__put(maps_by_address[address_idx]);
519 memmove(&maps_by_address[address_idx],
520 &maps_by_address[address_idx + 1],
521 (nr_maps - address_idx - 1) * sizeof(*maps_by_address));
524 unsigned int name_idx = maps__by_name_index(maps, map);
526 map__put(maps_by_name[name_idx]);
527 memmove(&maps_by_name[name_idx],
528 &maps_by_name[name_idx + 1],
529 (nr_maps - name_idx - 1) * sizeof(*maps_by_name));
532 --RC_CHK_ACCESS(maps)->nr_maps;
535 void maps__remove(struct maps *maps, struct map *map)
537 down_write(maps__lock(maps));
538 __maps__remove(maps, map);
539 up_write(maps__lock(maps));
542 bool maps__empty(struct maps *maps)
546 down_read(maps__lock(maps));
547 res = maps__nr_maps(maps) == 0;
548 up_read(maps__lock(maps));
553 bool maps__equal(struct maps *a, struct maps *b)
555 return RC_CHK_EQUAL(a, b);
558 int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
563 /* See locking/sorting note. */
565 down_read(maps__lock(maps));
566 if (maps__maps_by_address_sorted(maps)) {
568 * maps__for_each_map callbacks may buggily/unsafely
569 * insert into maps_by_address. Deliberately reload
570 * maps__nr_maps and maps_by_address on each iteration
571 * to avoid using memory freed by maps__insert growing
572 * the array - this may cause maps to be skipped or
575 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
576 struct map **maps_by_address = maps__maps_by_address(maps);
577 struct map *map = maps_by_address[i];
585 up_read(maps__lock(maps));
587 maps__sort_by_address(maps);
592 void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
594 struct map **maps_by_address;
596 down_write(maps__lock(maps));
598 maps_by_address = maps__maps_by_address(maps);
599 for (unsigned int i = 0; i < maps__nr_maps(maps);) {
600 if (cb(maps_by_address[i], data))
601 __maps__remove(maps, maps_by_address[i]);
605 up_write(maps__lock(maps));
608 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
610 struct map *map = maps__find(maps, addr);
611 struct symbol *result = NULL;
613 /* Ensure map is loaded before using map->map_ip */
614 if (map != NULL && map__load(map) >= 0)
615 result = map__find_symbol(map, map__map_ip(map, addr));
625 struct maps__find_symbol_by_name_args {
631 static int maps__find_symbol_by_name_cb(struct map *map, void *data)
633 struct maps__find_symbol_by_name_args *args = data;
635 args->sym = map__find_symbol_by_name(map, args->name);
639 if (!map__contains_symbol(map, args->sym)) {
644 if (args->mapp != NULL)
645 *args->mapp = map__get(map);
649 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
651 struct maps__find_symbol_by_name_args args = {
657 maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args);
661 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
663 if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
666 ams->ms.map = maps__find(maps, ams->addr);
667 if (ams->ms.map == NULL)
671 ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
672 ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
674 return ams->ms.sym ? 0 : -1;
677 struct maps__fprintf_args {
682 static int maps__fprintf_cb(struct map *map, void *data)
684 struct maps__fprintf_args *args = data;
686 args->printed += fprintf(args->fp, "Map:");
687 args->printed += map__fprintf(map, args->fp);
689 args->printed += dso__fprintf(map__dso(map), args->fp);
690 args->printed += fprintf(args->fp, "--\n");
695 size_t maps__fprintf(struct maps *maps, FILE *fp)
697 struct maps__fprintf_args args = {
702 maps__for_each_map(maps, maps__fprintf_cb, &args);
708 * Find first map where end > map->start.
709 * Same as find_vma() in kernel.
711 static unsigned int first_ending_after(struct maps *maps, const struct map *map)
713 struct map **maps_by_address = maps__maps_by_address(maps);
714 int low = 0, high = (int)maps__nr_maps(maps) - 1, first = high + 1;
716 assert(maps__maps_by_address_sorted(maps));
717 if (low <= high && map__end(maps_by_address[0]) > map__start(map))
720 while (low <= high) {
721 int mid = (low + high) / 2;
722 struct map *pos = maps_by_address[mid];
724 if (map__end(pos) > map__start(map)) {
726 if (map__start(pos) <= map__start(map)) {
727 /* Entry overlaps map. */
738 * Adds new to maps, if new overlaps existing entries then the existing maps are
739 * adjusted or removed so that new fits without overlapping any entries.
741 static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
743 struct map **maps_by_address;
745 FILE *fp = debug_file();
748 if (!maps__maps_by_address_sorted(maps))
749 __maps__sort_by_address(maps);
751 maps_by_address = maps__maps_by_address(maps);
753 * Iterate through entries where the end of the existing entry is
754 * greater-than the new map's start.
756 for (unsigned int i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) {
757 struct map *pos = maps_by_address[i];
758 struct map *before = NULL, *after = NULL;
761 * Stop if current map starts after map->end.
762 * Maps are ordered by start: next will not overlap for sure.
764 if (map__start(pos) >= map__end(new))
768 pr_debug("overlapping maps in %s (disable tui for more info)\n",
769 map__dso(new)->name);
770 } else if (verbose >= 2) {
771 pr_debug("overlapping maps:\n");
772 map__fprintf(new, fp);
773 map__fprintf(pos, fp);
777 * Now check if we need to create new maps for areas not
778 * overlapped by the new map:
780 if (map__start(new) > map__start(pos)) {
781 /* Map starts within existing map. Need to shorten the existing map. */
782 before = map__clone(pos);
784 if (before == NULL) {
788 map__set_end(before, map__start(new));
790 if (verbose >= 2 && !use_browser)
791 map__fprintf(before, fp);
793 if (map__end(new) < map__end(pos)) {
794 /* The new map isn't as long as the existing map. */
795 after = map__clone(pos);
803 map__set_start(after, map__end(new));
804 map__add_pgoff(after, map__end(new) - map__start(pos));
805 assert(map__map_ip(pos, map__end(new)) ==
806 map__map_ip(after, map__end(new)));
808 if (verbose >= 2 && !use_browser)
809 map__fprintf(after, fp);
812 * If adding one entry, for `before` or `after`, we can replace
813 * the existing entry. If both `before` and `after` are
814 * necessary than an insert is needed. If the existing entry
815 * entirely overlaps the existing entry it can just be removed.
818 map__put(maps_by_address[i]);
819 maps_by_address[i] = before;
820 /* Maps are still ordered, go to next one. */
823 __maps__insert(maps, after);
825 if (!maps__maps_by_address_sorted(maps)) {
827 * Sorting broken so invariants don't
828 * hold, sort and go again.
833 * Maps are still ordered, skip after and go to
834 * next one (terminate loop).
839 map__put(maps_by_address[i]);
840 maps_by_address[i] = after;
841 /* Maps are ordered, go to next one. */
844 __maps__remove(maps, pos);
846 * Maps are ordered but no need to increase `i` as the
847 * later maps were moved down.
850 check_invariants(maps);
853 __maps__insert(maps, new);
858 int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
862 down_write(maps__lock(maps));
863 err = __maps__fixup_overlap_and_insert(maps, new);
864 up_write(maps__lock(maps));
868 int maps__copy_from(struct maps *dest, struct maps *parent)
870 /* Note, if struct map were immutable then cloning could use ref counts. */
871 struct map **parent_maps_by_address;
875 down_write(maps__lock(dest));
876 down_read(maps__lock(parent));
878 parent_maps_by_address = maps__maps_by_address(parent);
879 n = maps__nr_maps(parent);
880 if (maps__nr_maps(dest) == 0) {
881 /* No existing mappings so just copy from parent to avoid reallocs in insert. */
882 unsigned int nr_maps_allocated = RC_CHK_ACCESS(parent)->nr_maps_allocated;
883 struct map **dest_maps_by_address =
884 malloc(nr_maps_allocated * sizeof(struct map *));
885 struct map **dest_maps_by_name = NULL;
887 if (!dest_maps_by_address)
890 if (maps__maps_by_name(parent)) {
892 malloc(nr_maps_allocated * sizeof(struct map *));
895 RC_CHK_ACCESS(dest)->maps_by_address = dest_maps_by_address;
896 RC_CHK_ACCESS(dest)->maps_by_name = dest_maps_by_name;
897 RC_CHK_ACCESS(dest)->nr_maps_allocated = nr_maps_allocated;
900 for (unsigned int i = 0; !err && i < n; i++) {
901 struct map *pos = parent_maps_by_address[i];
902 struct map *new = map__clone(pos);
907 err = unwind__prepare_access(dest, new, NULL);
909 dest_maps_by_address[i] = new;
910 if (dest_maps_by_name)
911 dest_maps_by_name[i] = map__get(new);
912 RC_CHK_ACCESS(dest)->nr_maps = i + 1;
918 maps__set_maps_by_address_sorted(dest, maps__maps_by_address_sorted(parent));
920 RC_CHK_ACCESS(dest)->last_search_by_name_idx =
921 RC_CHK_ACCESS(parent)->last_search_by_name_idx;
922 maps__set_maps_by_name_sorted(dest,
924 maps__maps_by_name_sorted(parent));
926 RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0;
927 maps__set_maps_by_name_sorted(dest, false);
930 /* Unexpected copying to a maps containing entries. */
931 for (unsigned int i = 0; !err && i < n; i++) {
932 struct map *pos = parent_maps_by_address[i];
933 struct map *new = map__clone(pos);
938 err = unwind__prepare_access(dest, new, NULL);
940 err = __maps__insert(dest, new);
945 up_read(maps__lock(parent));
946 up_write(maps__lock(dest));
950 static int map__addr_cmp(const void *key, const void *entry)
952 const u64 ip = *(const u64 *)key;
953 const struct map *map = *(const struct map * const *)entry;
955 if (ip < map__start(map))
957 if (ip >= map__end(map))
962 struct map *maps__find(struct maps *maps, u64 ip)
964 struct map *result = NULL;
967 /* See locking/sorting note. */
969 down_read(maps__lock(maps));
970 if (maps__maps_by_address_sorted(maps)) {
972 bsearch(&ip, maps__maps_by_address(maps), maps__nr_maps(maps),
973 sizeof(*mapp), map__addr_cmp);
976 result = map__get(*mapp);
979 up_read(maps__lock(maps));
981 maps__sort_by_address(maps);
986 static int map__strcmp_name(const void *name, const void *b)
988 const struct dso *dso = map__dso(*(const struct map **)b);
990 return strcmp(name, dso->short_name);
993 struct map *maps__find_by_name(struct maps *maps, const char *name)
995 struct map *result = NULL;
998 /* See locking/sorting note. */
1002 down_read(maps__lock(maps));
1004 /* First check last found entry. */
1005 i = RC_CHK_ACCESS(maps)->last_search_by_name_idx;
1006 if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) {
1007 struct dso *dso = map__dso(maps__maps_by_name(maps)[i]);
1009 if (dso && strcmp(dso->short_name, name) == 0) {
1010 result = map__get(maps__maps_by_name(maps)[i]);
1015 /* Second search sorted array. */
1016 if (!done && maps__maps_by_name_sorted(maps)) {
1018 bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
1019 sizeof(*mapp), map__strcmp_name);
1022 result = map__get(*mapp);
1023 i = mapp - maps__maps_by_name(maps);
1024 RC_CHK_ACCESS(maps)->last_search_by_name_idx = i;
1028 up_read(maps__lock(maps));
1030 /* Sort and retry binary search. */
1031 if (maps__sort_by_name(maps)) {
1033 * Memory allocation failed do linear search
1034 * through address sorted maps.
1036 struct map **maps_by_address;
1039 down_read(maps__lock(maps));
1040 maps_by_address = maps__maps_by_address(maps);
1041 n = maps__nr_maps(maps);
1042 for (i = 0; i < n; i++) {
1043 struct map *pos = maps_by_address[i];
1044 struct dso *dso = map__dso(pos);
1046 if (dso && strcmp(dso->short_name, name) == 0) {
1047 result = map__get(pos);
1051 up_read(maps__lock(maps));
1059 struct map *maps__find_next_entry(struct maps *maps, struct map *map)
1062 struct map *result = NULL;
1064 down_read(maps__lock(maps));
1065 i = maps__by_address_index(maps, map);
1066 if (i < maps__nr_maps(maps))
1067 result = map__get(maps__maps_by_address(maps)[i]);
1069 up_read(maps__lock(maps));
1073 void maps__fixup_end(struct maps *maps)
1075 struct map **maps_by_address;
1078 down_write(maps__lock(maps));
1079 if (!maps__maps_by_address_sorted(maps))
1080 __maps__sort_by_address(maps);
1082 maps_by_address = maps__maps_by_address(maps);
1083 n = maps__nr_maps(maps);
1084 for (unsigned int i = 1; i < n; i++) {
1085 struct map *prev = maps_by_address[i - 1];
1086 struct map *curr = maps_by_address[i];
1088 if (!map__end(prev) || map__end(prev) > map__start(curr))
1089 map__set_end(prev, map__start(curr));
1093 * We still haven't the actual symbols, so guess the
1094 * last map final address.
1096 if (n > 0 && !map__end(maps_by_address[n - 1]))
1097 map__set_end(maps_by_address[n - 1], ~0ULL);
1099 RC_CHK_ACCESS(maps)->ends_broken = false;
1101 up_write(maps__lock(maps));
1105 * Merges map into maps by splitting the new map within the existing map
1108 int maps__merge_in(struct maps *kmaps, struct map *new_map)
1110 unsigned int first_after_, kmaps__nr_maps;
1111 struct map **kmaps_maps_by_address;
1112 struct map **merged_maps_by_address;
1113 unsigned int merged_nr_maps_allocated;
1115 /* First try under a read lock. */
1117 down_read(maps__lock(kmaps));
1118 if (maps__maps_by_address_sorted(kmaps))
1121 up_read(maps__lock(kmaps));
1123 /* First after binary search requires sorted maps. Sort and try again. */
1124 maps__sort_by_address(kmaps);
1126 first_after_ = first_ending_after(kmaps, new_map);
1127 kmaps_maps_by_address = maps__maps_by_address(kmaps);
1129 if (first_after_ >= maps__nr_maps(kmaps) ||
1130 map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
1131 /* No overlap so regular insert suffices. */
1132 up_read(maps__lock(kmaps));
1133 return maps__insert(kmaps, new_map);
1135 up_read(maps__lock(kmaps));
1137 /* Plain insert with a read-lock failed, try again now with the write lock. */
1138 down_write(maps__lock(kmaps));
1139 if (!maps__maps_by_address_sorted(kmaps))
1140 __maps__sort_by_address(kmaps);
1142 first_after_ = first_ending_after(kmaps, new_map);
1143 kmaps_maps_by_address = maps__maps_by_address(kmaps);
1144 kmaps__nr_maps = maps__nr_maps(kmaps);
1146 if (first_after_ >= kmaps__nr_maps ||
1147 map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
1148 /* No overlap so regular insert suffices. */
1149 int ret = __maps__insert(kmaps, new_map);
1150 up_write(maps__lock(kmaps));
1153 /* Array to merge into, possibly 1 more for the sake of new_map. */
1154 merged_nr_maps_allocated = RC_CHK_ACCESS(kmaps)->nr_maps_allocated;
1155 if (kmaps__nr_maps + 1 == merged_nr_maps_allocated)
1156 merged_nr_maps_allocated++;
1158 merged_maps_by_address = malloc(merged_nr_maps_allocated * sizeof(*merged_maps_by_address));
1159 if (!merged_maps_by_address) {
1160 up_write(maps__lock(kmaps));
1163 maps__set_maps_by_address(kmaps, merged_maps_by_address);
1164 maps__set_maps_by_address_sorted(kmaps, true);
1165 zfree(maps__maps_by_name_addr(kmaps));
1166 maps__set_maps_by_name_sorted(kmaps, true);
1167 maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated);
1169 /* Copy entries before the new_map that can't overlap. */
1170 for (unsigned int i = 0; i < first_after_; i++)
1171 merged_maps_by_address[i] = map__get(kmaps_maps_by_address[i]);
1173 maps__set_nr_maps(kmaps, first_after_);
1175 /* Add the new map, it will be split when the later overlapping mappings are added. */
1176 __maps__insert(kmaps, new_map);
1178 /* Insert mappings after new_map, splitting new_map in the process. */
1179 for (unsigned int i = first_after_; i < kmaps__nr_maps; i++)
1180 __maps__fixup_overlap_and_insert(kmaps, kmaps_maps_by_address[i]);
1182 /* Copy the maps from merged into kmaps. */
1183 for (unsigned int i = 0; i < kmaps__nr_maps; i++)
1184 map__zput(kmaps_maps_by_address[i]);
1186 free(kmaps_maps_by_address);
1187 up_write(maps__lock(kmaps));
1191 void maps__load_first(struct maps *maps)
1193 down_read(maps__lock(maps));
1195 if (maps__nr_maps(maps) > 0)
1196 map__load(maps__maps_by_address(maps)[0]);
1198 up_read(maps__lock(maps));