]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - tools/perf/util/map.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-jammy-kernel.git] / tools / perf / util / map.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "symbol.h"
3 #include <errno.h>
4 #include <inttypes.h>
5 #include <limits.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <stdio.h>
9 #include <unistd.h>
10 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
11 #include "map.h"
12 #include "thread.h"
13 #include "vdso.h"
14 #include "build-id.h"
15 #include "util.h"
16 #include "debug.h"
17 #include "machine.h"
18 #include <linux/string.h>
19 #include "srcline.h"
20 #include "namespaces.h"
21 #include "unwind.h"
22
23 static void __maps__insert(struct maps *maps, struct map *map);
24
25 const char *map_type__name[MAP__NR_TYPES] = {
26 [MAP__FUNCTION] = "Functions",
27 [MAP__VARIABLE] = "Variables",
28 };
29
30 static inline int is_anon_memory(const char *filename, u32 flags)
31 {
32 return flags & MAP_HUGETLB ||
33 !strcmp(filename, "//anon") ||
34 !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) ||
35 !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
36 }
37
38 static inline int is_no_dso_memory(const char *filename)
39 {
40 return !strncmp(filename, "[stack", 6) ||
41 !strncmp(filename, "/SYSV",5) ||
42 !strcmp(filename, "[heap]");
43 }
44
45 static inline int is_android_lib(const char *filename)
46 {
47 return !strncmp(filename, "/data/app-lib", 13) ||
48 !strncmp(filename, "/system/lib", 11);
49 }
50
51 static inline bool replace_android_lib(const char *filename, char *newfilename)
52 {
53 const char *libname;
54 char *app_abi;
55 size_t app_abi_length, new_length;
56 size_t lib_length = 0;
57
58 libname = strrchr(filename, '/');
59 if (libname)
60 lib_length = strlen(libname);
61
62 app_abi = getenv("APP_ABI");
63 if (!app_abi)
64 return false;
65
66 app_abi_length = strlen(app_abi);
67
68 if (!strncmp(filename, "/data/app-lib", 13)) {
69 char *apk_path;
70
71 if (!app_abi_length)
72 return false;
73
74 new_length = 7 + app_abi_length + lib_length;
75
76 apk_path = getenv("APK_PATH");
77 if (apk_path) {
78 new_length += strlen(apk_path) + 1;
79 if (new_length > PATH_MAX)
80 return false;
81 snprintf(newfilename, new_length,
82 "%s/libs/%s/%s", apk_path, app_abi, libname);
83 } else {
84 if (new_length > PATH_MAX)
85 return false;
86 snprintf(newfilename, new_length,
87 "libs/%s/%s", app_abi, libname);
88 }
89 return true;
90 }
91
92 if (!strncmp(filename, "/system/lib/", 11)) {
93 char *ndk, *app;
94 const char *arch;
95 size_t ndk_length;
96 size_t app_length;
97
98 ndk = getenv("NDK_ROOT");
99 app = getenv("APP_PLATFORM");
100
101 if (!(ndk && app))
102 return false;
103
104 ndk_length = strlen(ndk);
105 app_length = strlen(app);
106
107 if (!(ndk_length && app_length && app_abi_length))
108 return false;
109
110 arch = !strncmp(app_abi, "arm", 3) ? "arm" :
111 !strncmp(app_abi, "mips", 4) ? "mips" :
112 !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
113
114 if (!arch)
115 return false;
116
117 new_length = 27 + ndk_length +
118 app_length + lib_length
119 + strlen(arch);
120
121 if (new_length > PATH_MAX)
122 return false;
123 snprintf(newfilename, new_length,
124 "%s/platforms/%s/arch-%s/usr/lib/%s",
125 ndk, app, arch, libname);
126
127 return true;
128 }
129 return false;
130 }
131
132 void map__init(struct map *map, enum map_type type,
133 u64 start, u64 end, u64 pgoff, struct dso *dso)
134 {
135 map->type = type;
136 map->start = start;
137 map->end = end;
138 map->pgoff = pgoff;
139 map->reloc = 0;
140 map->dso = dso__get(dso);
141 map->map_ip = map__map_ip;
142 map->unmap_ip = map__unmap_ip;
143 RB_CLEAR_NODE(&map->rb_node);
144 map->groups = NULL;
145 map->erange_warned = false;
146 refcount_set(&map->refcnt, 1);
147 }
148
149 struct map *map__new(struct machine *machine, u64 start, u64 len,
150 u64 pgoff, u32 d_maj, u32 d_min, u64 ino,
151 u64 ino_gen, u32 prot, u32 flags, char *filename,
152 enum map_type type, struct thread *thread)
153 {
154 struct map *map = malloc(sizeof(*map));
155 struct nsinfo *nsi = NULL;
156 struct nsinfo *nnsi;
157
158 if (map != NULL) {
159 char newfilename[PATH_MAX];
160 struct dso *dso;
161 int anon, no_dso, vdso, android;
162
163 android = is_android_lib(filename);
164 anon = is_anon_memory(filename, flags);
165 vdso = is_vdso_map(filename);
166 no_dso = is_no_dso_memory(filename);
167
168 map->maj = d_maj;
169 map->min = d_min;
170 map->ino = ino;
171 map->ino_generation = ino_gen;
172 map->prot = prot;
173 map->flags = flags;
174 nsi = nsinfo__get(thread->nsinfo);
175
176 if ((anon || no_dso) && nsi && type == MAP__FUNCTION) {
177 snprintf(newfilename, sizeof(newfilename),
178 "/tmp/perf-%d.map", nsi->pid);
179 filename = newfilename;
180 }
181
182 if (android) {
183 if (replace_android_lib(filename, newfilename))
184 filename = newfilename;
185 }
186
187 if (vdso) {
188 /* The vdso maps are always on the host and not the
189 * container. Ensure that we don't use setns to look
190 * them up.
191 */
192 nnsi = nsinfo__copy(nsi);
193 if (nnsi) {
194 nsinfo__put(nsi);
195 nnsi->need_setns = false;
196 nsi = nnsi;
197 }
198 pgoff = 0;
199 dso = machine__findnew_vdso(machine, thread);
200 } else
201 dso = machine__findnew_dso(machine, filename);
202
203 if (dso == NULL)
204 goto out_delete;
205
206 map__init(map, type, start, start + len, pgoff, dso);
207
208 if (anon || no_dso) {
209 map->map_ip = map->unmap_ip = identity__map_ip;
210
211 /*
212 * Set memory without DSO as loaded. All map__find_*
213 * functions still return NULL, and we avoid the
214 * unnecessary map__load warning.
215 */
216 if (type != MAP__FUNCTION)
217 dso__set_loaded(dso, map->type);
218 }
219 dso->nsinfo = nsi;
220 dso__put(dso);
221 }
222 return map;
223 out_delete:
224 nsinfo__put(nsi);
225 free(map);
226 return NULL;
227 }
228
229 /*
230 * Constructor variant for modules (where we know from /proc/modules where
231 * they are loaded) and for vmlinux, where only after we load all the
232 * symbols we'll know where it starts and ends.
233 */
234 struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
235 {
236 struct map *map = calloc(1, (sizeof(*map) +
237 (dso->kernel ? sizeof(struct kmap) : 0)));
238 if (map != NULL) {
239 /*
240 * ->end will be filled after we load all the symbols
241 */
242 map__init(map, type, start, 0, 0, dso);
243 }
244
245 return map;
246 }
247
248 /*
249 * Use this and __map__is_kmodule() for map instances that are in
250 * machine->kmaps, and thus have map->groups->machine all properly set, to
251 * disambiguate between the kernel and modules.
252 *
253 * When the need arises, introduce map__is_{kernel,kmodule)() that
254 * checks (map->groups != NULL && map->groups->machine != NULL &&
255 * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
256 */
257 bool __map__is_kernel(const struct map *map)
258 {
259 return __machine__kernel_map(map->groups->machine, map->type) == map;
260 }
261
262 static void map__exit(struct map *map)
263 {
264 BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
265 dso__zput(map->dso);
266 }
267
268 void map__delete(struct map *map)
269 {
270 map__exit(map);
271 free(map);
272 }
273
274 void map__put(struct map *map)
275 {
276 if (map && refcount_dec_and_test(&map->refcnt))
277 map__delete(map);
278 }
279
280 void map__fixup_start(struct map *map)
281 {
282 struct rb_root *symbols = &map->dso->symbols[map->type];
283 struct rb_node *nd = rb_first(symbols);
284 if (nd != NULL) {
285 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
286 map->start = sym->start;
287 }
288 }
289
290 void map__fixup_end(struct map *map)
291 {
292 struct rb_root *symbols = &map->dso->symbols[map->type];
293 struct rb_node *nd = rb_last(symbols);
294 if (nd != NULL) {
295 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
296 map->end = sym->end;
297 }
298 }
299
300 #define DSO__DELETED "(deleted)"
301
302 int map__load(struct map *map)
303 {
304 const char *name = map->dso->long_name;
305 int nr;
306
307 if (dso__loaded(map->dso, map->type))
308 return 0;
309
310 nr = dso__load(map->dso, map);
311 if (nr < 0) {
312 if (map->dso->has_build_id) {
313 char sbuild_id[SBUILD_ID_SIZE];
314
315 build_id__sprintf(map->dso->build_id,
316 sizeof(map->dso->build_id),
317 sbuild_id);
318 pr_warning("%s with build id %s not found",
319 name, sbuild_id);
320 } else
321 pr_warning("Failed to open %s", name);
322
323 pr_warning(", continuing without symbols\n");
324 return -1;
325 } else if (nr == 0) {
326 #ifdef HAVE_LIBELF_SUPPORT
327 const size_t len = strlen(name);
328 const size_t real_len = len - sizeof(DSO__DELETED);
329
330 if (len > sizeof(DSO__DELETED) &&
331 strcmp(name + real_len + 1, DSO__DELETED) == 0) {
332 pr_warning("%.*s was updated (is prelink enabled?). "
333 "Restart the long running apps that use it!\n",
334 (int)real_len, name);
335 } else {
336 pr_warning("no symbols found in %s, maybe install "
337 "a debug package?\n", name);
338 }
339 #endif
340 return -1;
341 }
342
343 return 0;
344 }
345
346 struct symbol *map__find_symbol(struct map *map, u64 addr)
347 {
348 if (map__load(map) < 0)
349 return NULL;
350
351 return dso__find_symbol(map->dso, map->type, addr);
352 }
353
354 struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
355 {
356 if (map__load(map) < 0)
357 return NULL;
358
359 if (!dso__sorted_by_name(map->dso, map->type))
360 dso__sort_by_name(map->dso, map->type);
361
362 return dso__find_symbol_by_name(map->dso, map->type, name);
363 }
364
365 struct map *map__clone(struct map *from)
366 {
367 struct map *map = memdup(from, sizeof(*map));
368
369 if (map != NULL) {
370 refcount_set(&map->refcnt, 1);
371 RB_CLEAR_NODE(&map->rb_node);
372 dso__get(map->dso);
373 map->groups = NULL;
374 }
375
376 return map;
377 }
378
379 int map__overlap(struct map *l, struct map *r)
380 {
381 if (l->start > r->start) {
382 struct map *t = l;
383 l = r;
384 r = t;
385 }
386
387 if (l->end > r->start)
388 return 1;
389
390 return 0;
391 }
392
393 size_t map__fprintf(struct map *map, FILE *fp)
394 {
395 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
396 map->start, map->end, map->pgoff, map->dso->name);
397 }
398
399 size_t map__fprintf_dsoname(struct map *map, FILE *fp)
400 {
401 const char *dsoname = "[unknown]";
402
403 if (map && map->dso) {
404 if (symbol_conf.show_kernel_path && map->dso->long_name)
405 dsoname = map->dso->long_name;
406 else
407 dsoname = map->dso->name;
408 }
409
410 return fprintf(fp, "%s", dsoname);
411 }
412
413 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
414 FILE *fp)
415 {
416 char *srcline;
417 int ret = 0;
418
419 if (map && map->dso) {
420 srcline = get_srcline(map->dso,
421 map__rip_2objdump(map, addr), NULL,
422 true, true);
423 if (srcline != SRCLINE_UNKNOWN)
424 ret = fprintf(fp, "%s%s", prefix, srcline);
425 free_srcline(srcline);
426 }
427 return ret;
428 }
429
430 /**
431 * map__rip_2objdump - convert symbol start address to objdump address.
432 * @map: memory map
433 * @rip: symbol start address
434 *
435 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
436 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
437 * relative to section start.
438 *
439 * Return: Address suitable for passing to "objdump --start-address="
440 */
441 u64 map__rip_2objdump(struct map *map, u64 rip)
442 {
443 if (!map->dso->adjust_symbols)
444 return rip;
445
446 if (map->dso->rel)
447 return rip - map->pgoff;
448
449 /*
450 * kernel modules also have DSO_TYPE_USER in dso->kernel,
451 * but all kernel modules are ET_REL, so won't get here.
452 */
453 if (map->dso->kernel == DSO_TYPE_USER)
454 return rip + map->dso->text_offset;
455
456 return map->unmap_ip(map, rip) - map->reloc;
457 }
458
459 /**
460 * map__objdump_2mem - convert objdump address to a memory address.
461 * @map: memory map
462 * @ip: objdump address
463 *
464 * Closely related to map__rip_2objdump(), this function takes an address from
465 * objdump and converts it to a memory address. Note this assumes that @map
466 * contains the address. To be sure the result is valid, check it forwards
467 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
468 *
469 * Return: Memory address.
470 */
471 u64 map__objdump_2mem(struct map *map, u64 ip)
472 {
473 if (!map->dso->adjust_symbols)
474 return map->unmap_ip(map, ip);
475
476 if (map->dso->rel)
477 return map->unmap_ip(map, ip + map->pgoff);
478
479 /*
480 * kernel modules also have DSO_TYPE_USER in dso->kernel,
481 * but all kernel modules are ET_REL, so won't get here.
482 */
483 if (map->dso->kernel == DSO_TYPE_USER)
484 return map->unmap_ip(map, ip - map->dso->text_offset);
485
486 return ip + map->reloc;
487 }
488
489 static void maps__init(struct maps *maps)
490 {
491 maps->entries = RB_ROOT;
492 pthread_rwlock_init(&maps->lock, NULL);
493 }
494
495 void map_groups__init(struct map_groups *mg, struct machine *machine)
496 {
497 int i;
498 for (i = 0; i < MAP__NR_TYPES; ++i) {
499 maps__init(&mg->maps[i]);
500 }
501 mg->machine = machine;
502 refcount_set(&mg->refcnt, 1);
503 }
504
505 static void __maps__purge(struct maps *maps)
506 {
507 struct rb_root *root = &maps->entries;
508 struct rb_node *next = rb_first(root);
509
510 while (next) {
511 struct map *pos = rb_entry(next, struct map, rb_node);
512
513 next = rb_next(&pos->rb_node);
514 rb_erase_init(&pos->rb_node, root);
515 map__put(pos);
516 }
517 }
518
519 static void maps__exit(struct maps *maps)
520 {
521 pthread_rwlock_wrlock(&maps->lock);
522 __maps__purge(maps);
523 pthread_rwlock_unlock(&maps->lock);
524 }
525
526 void map_groups__exit(struct map_groups *mg)
527 {
528 int i;
529
530 for (i = 0; i < MAP__NR_TYPES; ++i)
531 maps__exit(&mg->maps[i]);
532 }
533
534 bool map_groups__empty(struct map_groups *mg)
535 {
536 int i;
537
538 for (i = 0; i < MAP__NR_TYPES; ++i) {
539 if (maps__first(&mg->maps[i]))
540 return false;
541 }
542
543 return true;
544 }
545
546 struct map_groups *map_groups__new(struct machine *machine)
547 {
548 struct map_groups *mg = malloc(sizeof(*mg));
549
550 if (mg != NULL)
551 map_groups__init(mg, machine);
552
553 return mg;
554 }
555
556 void map_groups__delete(struct map_groups *mg)
557 {
558 map_groups__exit(mg);
559 free(mg);
560 }
561
562 void map_groups__put(struct map_groups *mg)
563 {
564 if (mg && refcount_dec_and_test(&mg->refcnt))
565 map_groups__delete(mg);
566 }
567
568 struct symbol *map_groups__find_symbol(struct map_groups *mg,
569 enum map_type type, u64 addr,
570 struct map **mapp)
571 {
572 struct map *map = map_groups__find(mg, type, addr);
573
574 /* Ensure map is loaded before using map->map_ip */
575 if (map != NULL && map__load(map) >= 0) {
576 if (mapp != NULL)
577 *mapp = map;
578 return map__find_symbol(map, map->map_ip(map, addr));
579 }
580
581 return NULL;
582 }
583
584 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
585 struct map **mapp)
586 {
587 struct symbol *sym;
588 struct rb_node *nd;
589
590 pthread_rwlock_rdlock(&maps->lock);
591
592 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
593 struct map *pos = rb_entry(nd, struct map, rb_node);
594
595 sym = map__find_symbol_by_name(pos, name);
596
597 if (sym == NULL)
598 continue;
599 if (mapp != NULL)
600 *mapp = pos;
601 goto out;
602 }
603
604 sym = NULL;
605 out:
606 pthread_rwlock_unlock(&maps->lock);
607 return sym;
608 }
609
610 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
611 enum map_type type,
612 const char *name,
613 struct map **mapp)
614 {
615 struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp);
616
617 return sym;
618 }
619
620 int map_groups__find_ams(struct addr_map_symbol *ams)
621 {
622 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
623 if (ams->map->groups == NULL)
624 return -1;
625 ams->map = map_groups__find(ams->map->groups, ams->map->type,
626 ams->addr);
627 if (ams->map == NULL)
628 return -1;
629 }
630
631 ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
632 ams->sym = map__find_symbol(ams->map, ams->al_addr);
633
634 return ams->sym ? 0 : -1;
635 }
636
637 static size_t maps__fprintf(struct maps *maps, FILE *fp)
638 {
639 size_t printed = 0;
640 struct rb_node *nd;
641
642 pthread_rwlock_rdlock(&maps->lock);
643
644 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
645 struct map *pos = rb_entry(nd, struct map, rb_node);
646 printed += fprintf(fp, "Map:");
647 printed += map__fprintf(pos, fp);
648 if (verbose > 2) {
649 printed += dso__fprintf(pos->dso, pos->type, fp);
650 printed += fprintf(fp, "--\n");
651 }
652 }
653
654 pthread_rwlock_unlock(&maps->lock);
655
656 return printed;
657 }
658
659 size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
660 FILE *fp)
661 {
662 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
663 return printed += maps__fprintf(&mg->maps[type], fp);
664 }
665
666 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
667 {
668 size_t printed = 0, i;
669 for (i = 0; i < MAP__NR_TYPES; ++i)
670 printed += __map_groups__fprintf_maps(mg, i, fp);
671 return printed;
672 }
673
674 static void __map_groups__insert(struct map_groups *mg, struct map *map)
675 {
676 __maps__insert(&mg->maps[map->type], map);
677 map->groups = mg;
678 }
679
680 static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
681 {
682 struct rb_root *root;
683 struct rb_node *next;
684 int err = 0;
685
686 pthread_rwlock_wrlock(&maps->lock);
687
688 root = &maps->entries;
689 next = rb_first(root);
690
691 while (next) {
692 struct map *pos = rb_entry(next, struct map, rb_node);
693 next = rb_next(&pos->rb_node);
694
695 if (!map__overlap(pos, map))
696 continue;
697
698 if (verbose >= 2) {
699
700 if (use_browser) {
701 pr_warning("overlapping maps in %s "
702 "(disable tui for more info)\n",
703 map->dso->name);
704 } else {
705 fputs("overlapping maps:\n", fp);
706 map__fprintf(map, fp);
707 map__fprintf(pos, fp);
708 }
709 }
710
711 rb_erase_init(&pos->rb_node, root);
712 /*
713 * Now check if we need to create new maps for areas not
714 * overlapped by the new map:
715 */
716 if (map->start > pos->start) {
717 struct map *before = map__clone(pos);
718
719 if (before == NULL) {
720 err = -ENOMEM;
721 goto put_map;
722 }
723
724 before->end = map->start;
725 __map_groups__insert(pos->groups, before);
726 if (verbose >= 2 && !use_browser)
727 map__fprintf(before, fp);
728 map__put(before);
729 }
730
731 if (map->end < pos->end) {
732 struct map *after = map__clone(pos);
733
734 if (after == NULL) {
735 err = -ENOMEM;
736 goto put_map;
737 }
738
739 after->start = map->end;
740 __map_groups__insert(pos->groups, after);
741 if (verbose >= 2 && !use_browser)
742 map__fprintf(after, fp);
743 map__put(after);
744 }
745 put_map:
746 map__put(pos);
747
748 if (err)
749 goto out;
750 }
751
752 err = 0;
753 out:
754 pthread_rwlock_unlock(&maps->lock);
755 return err;
756 }
757
758 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
759 FILE *fp)
760 {
761 return maps__fixup_overlappings(&mg->maps[map->type], map, fp);
762 }
763
764 /*
765 * XXX This should not really _copy_ te maps, but refcount them.
766 */
767 int map_groups__clone(struct thread *thread,
768 struct map_groups *parent, enum map_type type)
769 {
770 struct map_groups *mg = thread->mg;
771 int err = -ENOMEM;
772 struct map *map;
773 struct maps *maps = &parent->maps[type];
774
775 pthread_rwlock_rdlock(&maps->lock);
776
777 for (map = maps__first(maps); map; map = map__next(map)) {
778 struct map *new = map__clone(map);
779 if (new == NULL)
780 goto out_unlock;
781
782 err = unwind__prepare_access(thread, new, NULL);
783 if (err)
784 goto out_unlock;
785
786 map_groups__insert(mg, new);
787 map__put(new);
788 }
789
790 err = 0;
791 out_unlock:
792 pthread_rwlock_unlock(&maps->lock);
793 return err;
794 }
795
796 static void __maps__insert(struct maps *maps, struct map *map)
797 {
798 struct rb_node **p = &maps->entries.rb_node;
799 struct rb_node *parent = NULL;
800 const u64 ip = map->start;
801 struct map *m;
802
803 while (*p != NULL) {
804 parent = *p;
805 m = rb_entry(parent, struct map, rb_node);
806 if (ip < m->start)
807 p = &(*p)->rb_left;
808 else
809 p = &(*p)->rb_right;
810 }
811
812 rb_link_node(&map->rb_node, parent, p);
813 rb_insert_color(&map->rb_node, &maps->entries);
814 map__get(map);
815 }
816
817 void maps__insert(struct maps *maps, struct map *map)
818 {
819 pthread_rwlock_wrlock(&maps->lock);
820 __maps__insert(maps, map);
821 pthread_rwlock_unlock(&maps->lock);
822 }
823
824 static void __maps__remove(struct maps *maps, struct map *map)
825 {
826 rb_erase_init(&map->rb_node, &maps->entries);
827 map__put(map);
828 }
829
830 void maps__remove(struct maps *maps, struct map *map)
831 {
832 pthread_rwlock_wrlock(&maps->lock);
833 __maps__remove(maps, map);
834 pthread_rwlock_unlock(&maps->lock);
835 }
836
837 struct map *maps__find(struct maps *maps, u64 ip)
838 {
839 struct rb_node **p, *parent = NULL;
840 struct map *m;
841
842 pthread_rwlock_rdlock(&maps->lock);
843
844 p = &maps->entries.rb_node;
845 while (*p != NULL) {
846 parent = *p;
847 m = rb_entry(parent, struct map, rb_node);
848 if (ip < m->start)
849 p = &(*p)->rb_left;
850 else if (ip >= m->end)
851 p = &(*p)->rb_right;
852 else
853 goto out;
854 }
855
856 m = NULL;
857 out:
858 pthread_rwlock_unlock(&maps->lock);
859 return m;
860 }
861
862 struct map *maps__first(struct maps *maps)
863 {
864 struct rb_node *first = rb_first(&maps->entries);
865
866 if (first)
867 return rb_entry(first, struct map, rb_node);
868 return NULL;
869 }
870
871 struct map *map__next(struct map *map)
872 {
873 struct rb_node *next = rb_next(&map->rb_node);
874
875 if (next)
876 return rb_entry(next, struct map, rb_node);
877 return NULL;
878 }
879
880 struct kmap *map__kmap(struct map *map)
881 {
882 if (!map->dso || !map->dso->kernel) {
883 pr_err("Internal error: map__kmap with a non-kernel map\n");
884 return NULL;
885 }
886 return (struct kmap *)(map + 1);
887 }
888
889 struct map_groups *map__kmaps(struct map *map)
890 {
891 struct kmap *kmap = map__kmap(map);
892
893 if (!kmap || !kmap->kmaps) {
894 pr_err("Internal error: map__kmaps with a non-kernel map\n");
895 return NULL;
896 }
897 return kmap->kmaps;
898 }