xref: /linux/tools/perf/util/maps.c (revision 84b9b44b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <linux/zalloc.h>
5 #include "debug.h"
6 #include "dso.h"
7 #include "map.h"
8 #include "maps.h"
9 #include "thread.h"
10 #include "ui/ui.h"
11 #include "unwind.h"
12 
13 static void maps__init(struct maps *maps, struct machine *machine)
14 {
15 	refcount_set(maps__refcnt(maps), 1);
16 	init_rwsem(maps__lock(maps));
17 	RC_CHK_ACCESS(maps)->entries = RB_ROOT;
18 	RC_CHK_ACCESS(maps)->machine = machine;
19 	RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
20 	RC_CHK_ACCESS(maps)->nr_maps = 0;
21 	RC_CHK_ACCESS(maps)->maps_by_name = NULL;
22 }
23 
24 static void __maps__free_maps_by_name(struct maps *maps)
25 {
26 	/*
27 	 * Free everything to try to do it from the rbtree in the next search
28 	 */
29 	for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
30 		map__put(maps__maps_by_name(maps)[i]);
31 
32 	zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
33 	RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
34 }
35 
36 static int __maps__insert(struct maps *maps, struct map *map)
37 {
38 	struct rb_node **p = &maps__entries(maps)->rb_node;
39 	struct rb_node *parent = NULL;
40 	const u64 ip = map__start(map);
41 	struct map_rb_node *m, *new_rb_node;
42 
43 	new_rb_node = malloc(sizeof(*new_rb_node));
44 	if (!new_rb_node)
45 		return -ENOMEM;
46 
47 	RB_CLEAR_NODE(&new_rb_node->rb_node);
48 	new_rb_node->map = map__get(map);
49 
50 	while (*p != NULL) {
51 		parent = *p;
52 		m = rb_entry(parent, struct map_rb_node, rb_node);
53 		if (ip < map__start(m->map))
54 			p = &(*p)->rb_left;
55 		else
56 			p = &(*p)->rb_right;
57 	}
58 
59 	rb_link_node(&new_rb_node->rb_node, parent, p);
60 	rb_insert_color(&new_rb_node->rb_node, maps__entries(maps));
61 	return 0;
62 }
63 
64 int maps__insert(struct maps *maps, struct map *map)
65 {
66 	int err;
67 	const struct dso *dso = map__dso(map);
68 
69 	down_write(maps__lock(maps));
70 	err = __maps__insert(maps, map);
71 	if (err)
72 		goto out;
73 
74 	++RC_CHK_ACCESS(maps)->nr_maps;
75 
76 	if (dso && dso->kernel) {
77 		struct kmap *kmap = map__kmap(map);
78 
79 		if (kmap)
80 			kmap->kmaps = maps;
81 		else
82 			pr_err("Internal error: kernel dso with non kernel map\n");
83 	}
84 
85 
86 	/*
87 	 * If we already performed some search by name, then we need to add the just
88 	 * inserted map and resort.
89 	 */
90 	if (maps__maps_by_name(maps)) {
91 		if (maps__nr_maps(maps) > RC_CHK_ACCESS(maps)->nr_maps_allocated) {
92 			int nr_allocate = maps__nr_maps(maps) * 2;
93 			struct map **maps_by_name = realloc(maps__maps_by_name(maps),
94 							    nr_allocate * sizeof(map));
95 
96 			if (maps_by_name == NULL) {
97 				__maps__free_maps_by_name(maps);
98 				err = -ENOMEM;
99 				goto out;
100 			}
101 
102 			RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
103 			RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
104 		}
105 		maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map__get(map);
106 		__maps__sort_by_name(maps);
107 	}
108  out:
109 	up_write(maps__lock(maps));
110 	return err;
111 }
112 
113 static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node)
114 {
115 	rb_erase_init(&rb_node->rb_node, maps__entries(maps));
116 	map__put(rb_node->map);
117 	free(rb_node);
118 }
119 
120 void maps__remove(struct maps *maps, struct map *map)
121 {
122 	struct map_rb_node *rb_node;
123 
124 	down_write(maps__lock(maps));
125 	if (RC_CHK_ACCESS(maps)->last_search_by_name == map)
126 		RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
127 
128 	rb_node = maps__find_node(maps, map);
129 	assert(rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map));
130 	__maps__remove(maps, rb_node);
131 	if (maps__maps_by_name(maps))
132 		__maps__free_maps_by_name(maps);
133 	--RC_CHK_ACCESS(maps)->nr_maps;
134 	up_write(maps__lock(maps));
135 }
136 
137 static void __maps__purge(struct maps *maps)
138 {
139 	struct map_rb_node *pos, *next;
140 
141 	if (maps__maps_by_name(maps))
142 		__maps__free_maps_by_name(maps);
143 
144 	maps__for_each_entry_safe(maps, pos, next) {
145 		rb_erase_init(&pos->rb_node,  maps__entries(maps));
146 		map__put(pos->map);
147 		free(pos);
148 	}
149 }
150 
151 static void maps__exit(struct maps *maps)
152 {
153 	down_write(maps__lock(maps));
154 	__maps__purge(maps);
155 	up_write(maps__lock(maps));
156 }
157 
158 bool maps__empty(struct maps *maps)
159 {
160 	return !maps__first(maps);
161 }
162 
163 struct maps *maps__new(struct machine *machine)
164 {
165 	struct maps *result;
166 	RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
167 
168 	if (ADD_RC_CHK(result, maps))
169 		maps__init(result, machine);
170 
171 	return result;
172 }
173 
174 void maps__delete(struct maps *maps)
175 {
176 	maps__exit(maps);
177 	unwind__finish_access(maps);
178 	RC_CHK_FREE(maps);
179 }
180 
181 struct maps *maps__get(struct maps *maps)
182 {
183 	struct maps *result;
184 
185 	if (RC_CHK_GET(result, maps))
186 		refcount_inc(maps__refcnt(maps));
187 
188 	return result;
189 }
190 
191 void maps__put(struct maps *maps)
192 {
193 	if (maps && refcount_dec_and_test(maps__refcnt(maps)))
194 		maps__delete(maps);
195 	else
196 		RC_CHK_PUT(maps);
197 }
198 
199 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
200 {
201 	struct map *map = maps__find(maps, addr);
202 
203 	/* Ensure map is loaded before using map->map_ip */
204 	if (map != NULL && map__load(map) >= 0) {
205 		if (mapp != NULL)
206 			*mapp = map;
207 		return map__find_symbol(map, map__map_ip(map, addr));
208 	}
209 
210 	return NULL;
211 }
212 
213 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
214 {
215 	struct symbol *sym;
216 	struct map_rb_node *pos;
217 
218 	down_read(maps__lock(maps));
219 
220 	maps__for_each_entry(maps, pos) {
221 		sym = map__find_symbol_by_name(pos->map, name);
222 
223 		if (sym == NULL)
224 			continue;
225 		if (!map__contains_symbol(pos->map, sym)) {
226 			sym = NULL;
227 			continue;
228 		}
229 		if (mapp != NULL)
230 			*mapp = pos->map;
231 		goto out;
232 	}
233 
234 	sym = NULL;
235 out:
236 	up_read(maps__lock(maps));
237 	return sym;
238 }
239 
240 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
241 {
242 	if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
243 		if (maps == NULL)
244 			return -1;
245 		ams->ms.map = maps__find(maps, ams->addr);
246 		if (ams->ms.map == NULL)
247 			return -1;
248 	}
249 
250 	ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
251 	ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
252 
253 	return ams->ms.sym ? 0 : -1;
254 }
255 
256 size_t maps__fprintf(struct maps *maps, FILE *fp)
257 {
258 	size_t printed = 0;
259 	struct map_rb_node *pos;
260 
261 	down_read(maps__lock(maps));
262 
263 	maps__for_each_entry(maps, pos) {
264 		printed += fprintf(fp, "Map:");
265 		printed += map__fprintf(pos->map, fp);
266 		if (verbose > 2) {
267 			printed += dso__fprintf(map__dso(pos->map), fp);
268 			printed += fprintf(fp, "--\n");
269 		}
270 	}
271 
272 	up_read(maps__lock(maps));
273 
274 	return printed;
275 }
276 
277 int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
278 {
279 	struct rb_root *root;
280 	struct rb_node *next, *first;
281 	int err = 0;
282 
283 	down_write(maps__lock(maps));
284 
285 	root = maps__entries(maps);
286 
287 	/*
288 	 * Find first map where end > map->start.
289 	 * Same as find_vma() in kernel.
290 	 */
291 	next = root->rb_node;
292 	first = NULL;
293 	while (next) {
294 		struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
295 
296 		if (map__end(pos->map) > map__start(map)) {
297 			first = next;
298 			if (map__start(pos->map) <= map__start(map))
299 				break;
300 			next = next->rb_left;
301 		} else
302 			next = next->rb_right;
303 	}
304 
305 	next = first;
306 	while (next && !err) {
307 		struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
308 		next = rb_next(&pos->rb_node);
309 
310 		/*
311 		 * Stop if current map starts after map->end.
312 		 * Maps are ordered by start: next will not overlap for sure.
313 		 */
314 		if (map__start(pos->map) >= map__end(map))
315 			break;
316 
317 		if (verbose >= 2) {
318 
319 			if (use_browser) {
320 				pr_debug("overlapping maps in %s (disable tui for more info)\n",
321 					 map__dso(map)->name);
322 			} else {
323 				fputs("overlapping maps:\n", fp);
324 				map__fprintf(map, fp);
325 				map__fprintf(pos->map, fp);
326 			}
327 		}
328 
329 		rb_erase_init(&pos->rb_node, root);
330 		/*
331 		 * Now check if we need to create new maps for areas not
332 		 * overlapped by the new map:
333 		 */
334 		if (map__start(map) > map__start(pos->map)) {
335 			struct map *before = map__clone(pos->map);
336 
337 			if (before == NULL) {
338 				err = -ENOMEM;
339 				goto put_map;
340 			}
341 
342 			map__set_end(before, map__start(map));
343 			err = __maps__insert(maps, before);
344 			if (err) {
345 				map__put(before);
346 				goto put_map;
347 			}
348 
349 			if (verbose >= 2 && !use_browser)
350 				map__fprintf(before, fp);
351 			map__put(before);
352 		}
353 
354 		if (map__end(map) < map__end(pos->map)) {
355 			struct map *after = map__clone(pos->map);
356 
357 			if (after == NULL) {
358 				err = -ENOMEM;
359 				goto put_map;
360 			}
361 
362 			map__set_start(after, map__end(map));
363 			map__add_pgoff(after, map__end(map) - map__start(pos->map));
364 			assert(map__map_ip(pos->map, map__end(map)) ==
365 				map__map_ip(after, map__end(map)));
366 			err = __maps__insert(maps, after);
367 			if (err) {
368 				map__put(after);
369 				goto put_map;
370 			}
371 			if (verbose >= 2 && !use_browser)
372 				map__fprintf(after, fp);
373 			map__put(after);
374 		}
375 put_map:
376 		map__put(pos->map);
377 	}
378 	up_write(maps__lock(maps));
379 	return err;
380 }
381 
382 /*
383  * XXX This should not really _copy_ te maps, but refcount them.
384  */
385 int maps__clone(struct thread *thread, struct maps *parent)
386 {
387 	struct maps *maps = thread->maps;
388 	int err;
389 	struct map_rb_node *rb_node;
390 
391 	down_read(maps__lock(parent));
392 
393 	maps__for_each_entry(parent, rb_node) {
394 		struct map *new = map__clone(rb_node->map);
395 
396 		if (new == NULL) {
397 			err = -ENOMEM;
398 			goto out_unlock;
399 		}
400 
401 		err = unwind__prepare_access(maps, new, NULL);
402 		if (err)
403 			goto out_unlock;
404 
405 		err = maps__insert(maps, new);
406 		if (err)
407 			goto out_unlock;
408 
409 		map__put(new);
410 	}
411 
412 	err = 0;
413 out_unlock:
414 	up_read(maps__lock(parent));
415 	return err;
416 }
417 
418 struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
419 {
420 	struct map_rb_node *rb_node;
421 
422 	maps__for_each_entry(maps, rb_node) {
423 		if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map))
424 			return rb_node;
425 	}
426 	return NULL;
427 }
428 
429 struct map *maps__find(struct maps *maps, u64 ip)
430 {
431 	struct rb_node *p;
432 	struct map_rb_node *m;
433 
434 
435 	down_read(maps__lock(maps));
436 
437 	p = maps__entries(maps)->rb_node;
438 	while (p != NULL) {
439 		m = rb_entry(p, struct map_rb_node, rb_node);
440 		if (ip < map__start(m->map))
441 			p = p->rb_left;
442 		else if (ip >= map__end(m->map))
443 			p = p->rb_right;
444 		else
445 			goto out;
446 	}
447 
448 	m = NULL;
449 out:
450 	up_read(maps__lock(maps));
451 	return m ? m->map : NULL;
452 }
453 
454 struct map_rb_node *maps__first(struct maps *maps)
455 {
456 	struct rb_node *first = rb_first(maps__entries(maps));
457 
458 	if (first)
459 		return rb_entry(first, struct map_rb_node, rb_node);
460 	return NULL;
461 }
462 
463 struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
464 {
465 	struct rb_node *next;
466 
467 	if (!node)
468 		return NULL;
469 
470 	next = rb_next(&node->rb_node);
471 
472 	if (!next)
473 		return NULL;
474 
475 	return rb_entry(next, struct map_rb_node, rb_node);
476 }
477