1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/zalloc.h>
13 static void maps__init(struct maps *maps, struct machine *machine)
15 refcount_set(maps__refcnt(maps), 1);
16 init_rwsem(maps__lock(maps));
17 RC_CHK_ACCESS(maps)->entries = RB_ROOT;
18 RC_CHK_ACCESS(maps)->machine = machine;
19 RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
20 RC_CHK_ACCESS(maps)->nr_maps = 0;
21 RC_CHK_ACCESS(maps)->maps_by_name = NULL;
24 static void __maps__free_maps_by_name(struct maps *maps)
27 * Free everything to try to do it from the rbtree in the next search
29 for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
30 map__put(maps__maps_by_name(maps)[i]);
32 zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
33 RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
36 static int __maps__insert(struct maps *maps, struct map *map)
38 struct rb_node **p = &maps__entries(maps)->rb_node;
39 struct rb_node *parent = NULL;
40 const u64 ip = map__start(map);
41 struct map_rb_node *m, *new_rb_node;
43 new_rb_node = malloc(sizeof(*new_rb_node));
47 RB_CLEAR_NODE(&new_rb_node->rb_node);
48 new_rb_node->map = map__get(map);
52 m = rb_entry(parent, struct map_rb_node, rb_node);
53 if (ip < map__start(m->map))
59 rb_link_node(&new_rb_node->rb_node, parent, p);
60 rb_insert_color(&new_rb_node->rb_node, maps__entries(maps));
64 int maps__insert(struct maps *maps, struct map *map)
67 const struct dso *dso = map__dso(map);
69 down_write(maps__lock(maps));
70 err = __maps__insert(maps, map);
74 ++RC_CHK_ACCESS(maps)->nr_maps;
76 if (dso && dso->kernel) {
77 struct kmap *kmap = map__kmap(map);
82 pr_err("Internal error: kernel dso with non kernel map\n");
87 * If we already performed some search by name, then we need to add the just
88 * inserted map and resort.
90 if (maps__maps_by_name(maps)) {
91 if (maps__nr_maps(maps) > RC_CHK_ACCESS(maps)->nr_maps_allocated) {
92 int nr_allocate = maps__nr_maps(maps) * 2;
93 struct map **maps_by_name = realloc(maps__maps_by_name(maps),
94 nr_allocate * sizeof(map));
96 if (maps_by_name == NULL) {
97 __maps__free_maps_by_name(maps);
102 RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
103 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
105 maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map__get(map);
106 __maps__sort_by_name(maps);
109 up_write(maps__lock(maps));
113 static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node)
115 rb_erase_init(&rb_node->rb_node, maps__entries(maps));
116 map__put(rb_node->map);
120 void maps__remove(struct maps *maps, struct map *map)
122 struct map_rb_node *rb_node;
124 down_write(maps__lock(maps));
125 if (RC_CHK_ACCESS(maps)->last_search_by_name == map)
126 RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
128 rb_node = maps__find_node(maps, map);
129 assert(rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map));
130 __maps__remove(maps, rb_node);
131 if (maps__maps_by_name(maps))
132 __maps__free_maps_by_name(maps);
133 --RC_CHK_ACCESS(maps)->nr_maps;
134 up_write(maps__lock(maps));
137 static void __maps__purge(struct maps *maps)
139 struct map_rb_node *pos, *next;
141 if (maps__maps_by_name(maps))
142 __maps__free_maps_by_name(maps);
144 maps__for_each_entry_safe(maps, pos, next) {
145 rb_erase_init(&pos->rb_node, maps__entries(maps));
151 static void maps__exit(struct maps *maps)
153 down_write(maps__lock(maps));
155 up_write(maps__lock(maps));
158 bool maps__empty(struct maps *maps)
160 return !maps__first(maps);
163 struct maps *maps__new(struct machine *machine)
166 RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
168 if (ADD_RC_CHK(result, maps))
169 maps__init(result, machine);
174 static void maps__delete(struct maps *maps)
177 unwind__finish_access(maps);
181 struct maps *maps__get(struct maps *maps)
185 if (RC_CHK_GET(result, maps))
186 refcount_inc(maps__refcnt(maps));
191 void maps__put(struct maps *maps)
193 if (maps && refcount_dec_and_test(maps__refcnt(maps)))
199 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
201 struct map *map = maps__find(maps, addr);
203 /* Ensure map is loaded before using map->map_ip */
204 if (map != NULL && map__load(map) >= 0) {
207 return map__find_symbol(map, map__map_ip(map, addr));
213 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
216 struct map_rb_node *pos;
218 down_read(maps__lock(maps));
220 maps__for_each_entry(maps, pos) {
221 sym = map__find_symbol_by_name(pos->map, name);
225 if (!map__contains_symbol(pos->map, sym)) {
236 up_read(maps__lock(maps));
240 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
242 if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
245 ams->ms.map = maps__find(maps, ams->addr);
246 if (ams->ms.map == NULL)
250 ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
251 ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
253 return ams->ms.sym ? 0 : -1;
256 size_t maps__fprintf(struct maps *maps, FILE *fp)
259 struct map_rb_node *pos;
261 down_read(maps__lock(maps));
263 maps__for_each_entry(maps, pos) {
264 printed += fprintf(fp, "Map:");
265 printed += map__fprintf(pos->map, fp);
267 printed += dso__fprintf(map__dso(pos->map), fp);
268 printed += fprintf(fp, "--\n");
272 up_read(maps__lock(maps));
277 int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
279 struct rb_root *root;
280 struct rb_node *next, *first;
283 down_write(maps__lock(maps));
285 root = maps__entries(maps);
288 * Find first map where end > map->start.
289 * Same as find_vma() in kernel.
291 next = root->rb_node;
294 struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
296 if (map__end(pos->map) > map__start(map)) {
298 if (map__start(pos->map) <= map__start(map))
300 next = next->rb_left;
302 next = next->rb_right;
306 while (next && !err) {
307 struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
308 next = rb_next(&pos->rb_node);
311 * Stop if current map starts after map->end.
312 * Maps are ordered by start: next will not overlap for sure.
314 if (map__start(pos->map) >= map__end(map))
320 pr_debug("overlapping maps in %s (disable tui for more info)\n",
321 map__dso(map)->name);
323 fputs("overlapping maps:\n", fp);
324 map__fprintf(map, fp);
325 map__fprintf(pos->map, fp);
329 rb_erase_init(&pos->rb_node, root);
331 * Now check if we need to create new maps for areas not
332 * overlapped by the new map:
334 if (map__start(map) > map__start(pos->map)) {
335 struct map *before = map__clone(pos->map);
337 if (before == NULL) {
342 map__set_end(before, map__start(map));
343 err = __maps__insert(maps, before);
349 if (verbose >= 2 && !use_browser)
350 map__fprintf(before, fp);
354 if (map__end(map) < map__end(pos->map)) {
355 struct map *after = map__clone(pos->map);
362 map__set_start(after, map__end(map));
363 map__add_pgoff(after, map__end(map) - map__start(pos->map));
364 assert(map__map_ip(pos->map, map__end(map)) ==
365 map__map_ip(after, map__end(map)));
366 err = __maps__insert(maps, after);
371 if (verbose >= 2 && !use_browser)
372 map__fprintf(after, fp);
379 up_write(maps__lock(maps));
384 * XXX This should not really _copy_ te maps, but refcount them.
386 int maps__clone(struct thread *thread, struct maps *parent)
388 struct maps *maps = thread__maps(thread);
390 struct map_rb_node *rb_node;
392 down_read(maps__lock(parent));
394 maps__for_each_entry(parent, rb_node) {
395 struct map *new = map__clone(rb_node->map);
402 err = unwind__prepare_access(maps, new, NULL);
406 err = maps__insert(maps, new);
415 up_read(maps__lock(parent));
419 struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
421 struct map_rb_node *rb_node;
423 maps__for_each_entry(maps, rb_node) {
424 if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map))
430 struct map *maps__find(struct maps *maps, u64 ip)
433 struct map_rb_node *m;
436 down_read(maps__lock(maps));
438 p = maps__entries(maps)->rb_node;
440 m = rb_entry(p, struct map_rb_node, rb_node);
441 if (ip < map__start(m->map))
443 else if (ip >= map__end(m->map))
451 up_read(maps__lock(maps));
452 return m ? m->map : NULL;
455 struct map_rb_node *maps__first(struct maps *maps)
457 struct rb_node *first = rb_first(maps__entries(maps));
460 return rb_entry(first, struct map_rb_node, rb_node);
464 struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
466 struct rb_node *next;
471 next = rb_next(&node->rb_node);
476 return rb_entry(next, struct map_rb_node, rb_node);