Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <stdlib.h>
4#include <linux/zalloc.h>
5#include "debug.h"
6#include "dso.h"
7#include "map.h"
8#include "maps.h"
9#include "rwsem.h"
10#include "thread.h"
11#include "ui/ui.h"
12#include "unwind.h"
13#include <internal/rc_check.h>
14
15/*
16 * Locking/sorting note:
17 *
18 * Sorting is done with the write lock, iteration and binary searching happens
19 * under the read lock requiring being sorted. There is a race between sorting
20 * releasing the write lock and acquiring the read lock for iteration/searching
21 * where another thread could insert and break the sorting of the maps. In
22 * practice inserting maps should be rare meaning that the race shouldn't lead
23 * to live lock. Removal of maps doesn't break being sorted.
24 */
25
26DECLARE_RC_STRUCT(maps) {
27 struct rw_semaphore lock;
28 /**
29 * @maps_by_address: array of maps sorted by their starting address if
30 * maps_by_address_sorted is true.
31 */
32 struct map **maps_by_address;
33 /**
34 * @maps_by_name: optional array of maps sorted by their dso name if
35 * maps_by_name_sorted is true.
36 */
37 struct map **maps_by_name;
38 struct machine *machine;
39#ifdef HAVE_LIBUNWIND_SUPPORT
40 void *addr_space;
41 const struct unwind_libunwind_ops *unwind_libunwind_ops;
42#endif
43 refcount_t refcnt;
44 /**
45 * @nr_maps: number of maps_by_address, and possibly maps_by_name,
46 * entries that contain maps.
47 */
48 unsigned int nr_maps;
49 /**
50 * @nr_maps_allocated: number of entries in maps_by_address and possibly
51 * maps_by_name.
52 */
53 unsigned int nr_maps_allocated;
54 /**
55 * @last_search_by_name_idx: cache of last found by name entry's index
56 * as frequent searches for the same dso name are common.
57 */
58 unsigned int last_search_by_name_idx;
59 /** @maps_by_address_sorted: is maps_by_address sorted. */
60 bool maps_by_address_sorted;
61 /** @maps_by_name_sorted: is maps_by_name sorted. */
62 bool maps_by_name_sorted;
63 /** @ends_broken: does the map contain a map where end values are unset/unsorted? */
64 bool ends_broken;
65};
66
67static void check_invariants(const struct maps *maps __maybe_unused)
68{
69#ifndef NDEBUG
70 assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated);
71 for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
72 struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i];
73
74 /* Check map is well-formed. */
75 assert(map__end(map) == 0 || map__start(map) <= map__end(map));
76 /* Expect at least 1 reference count. */
77 assert(refcount_read(map__refcnt(map)) > 0);
78
79 if (map__dso(map) && dso__kernel(map__dso(map)))
80 assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps));
81
82 if (i > 0) {
83 struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1];
84
85 /* If addresses are sorted... */
86 if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) {
87 /* Maps should be in start address order. */
88 assert(map__start(prev) <= map__start(map));
89 /*
90 * If the ends of maps aren't broken (during
91 * construction) then they should be ordered
92 * too.
93 */
94 if (!RC_CHK_ACCESS(maps)->ends_broken) {
95 assert(map__end(prev) <= map__end(map));
96 assert(map__end(prev) <= map__start(map) ||
97 map__start(prev) == map__start(map));
98 }
99 }
100 }
101 }
102 if (RC_CHK_ACCESS(maps)->maps_by_name) {
103 for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
104 struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i];
105
106 /*
107 * Maps by name maps should be in maps_by_address, so
108 * the reference count should be higher.
109 */
110 assert(refcount_read(map__refcnt(map)) > 1);
111 }
112 }
113#endif
114}
115
116static struct map **maps__maps_by_address(const struct maps *maps)
117{
118 return RC_CHK_ACCESS(maps)->maps_by_address;
119}
120
121static void maps__set_maps_by_address(struct maps *maps, struct map **new)
122{
123 RC_CHK_ACCESS(maps)->maps_by_address = new;
124
125}
126
127static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated)
128{
129 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated;
130}
131
132static void maps__set_nr_maps(struct maps *maps, unsigned int nr_maps)
133{
134 RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
135}
136
137/* Not in the header, to aid reference counting. */
138static struct map **maps__maps_by_name(const struct maps *maps)
139{
140 return RC_CHK_ACCESS(maps)->maps_by_name;
141
142}
143
144static void maps__set_maps_by_name(struct maps *maps, struct map **new)
145{
146 RC_CHK_ACCESS(maps)->maps_by_name = new;
147
148}
149
150static bool maps__maps_by_address_sorted(const struct maps *maps)
151{
152 return RC_CHK_ACCESS(maps)->maps_by_address_sorted;
153}
154
155static void maps__set_maps_by_address_sorted(struct maps *maps, bool value)
156{
157 RC_CHK_ACCESS(maps)->maps_by_address_sorted = value;
158}
159
160static bool maps__maps_by_name_sorted(const struct maps *maps)
161{
162 return RC_CHK_ACCESS(maps)->maps_by_name_sorted;
163}
164
165static void maps__set_maps_by_name_sorted(struct maps *maps, bool value)
166{
167 RC_CHK_ACCESS(maps)->maps_by_name_sorted = value;
168}
169
170struct machine *maps__machine(const struct maps *maps)
171{
172 return RC_CHK_ACCESS(maps)->machine;
173}
174
175unsigned int maps__nr_maps(const struct maps *maps)
176{
177 return RC_CHK_ACCESS(maps)->nr_maps;
178}
179
180refcount_t *maps__refcnt(struct maps *maps)
181{
182 return &RC_CHK_ACCESS(maps)->refcnt;
183}
184
185#ifdef HAVE_LIBUNWIND_SUPPORT
186void *maps__addr_space(const struct maps *maps)
187{
188 return RC_CHK_ACCESS(maps)->addr_space;
189}
190
191void maps__set_addr_space(struct maps *maps, void *addr_space)
192{
193 RC_CHK_ACCESS(maps)->addr_space = addr_space;
194}
195
196const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps)
197{
198 return RC_CHK_ACCESS(maps)->unwind_libunwind_ops;
199}
200
201void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops)
202{
203 RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops;
204}
205#endif
206
207static struct rw_semaphore *maps__lock(struct maps *maps)
208{
209 return &RC_CHK_ACCESS(maps)->lock;
210}
211
212static void maps__init(struct maps *maps, struct machine *machine)
213{
214 init_rwsem(maps__lock(maps));
215 RC_CHK_ACCESS(maps)->maps_by_address = NULL;
216 RC_CHK_ACCESS(maps)->maps_by_name = NULL;
217 RC_CHK_ACCESS(maps)->machine = machine;
218#ifdef HAVE_LIBUNWIND_SUPPORT
219 RC_CHK_ACCESS(maps)->addr_space = NULL;
220 RC_CHK_ACCESS(maps)->unwind_libunwind_ops = NULL;
221#endif
222 refcount_set(maps__refcnt(maps), 1);
223 RC_CHK_ACCESS(maps)->nr_maps = 0;
224 RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
225 RC_CHK_ACCESS(maps)->last_search_by_name_idx = 0;
226 RC_CHK_ACCESS(maps)->maps_by_address_sorted = true;
227 RC_CHK_ACCESS(maps)->maps_by_name_sorted = false;
228}
229
230static void maps__exit(struct maps *maps)
231{
232 struct map **maps_by_address = maps__maps_by_address(maps);
233 struct map **maps_by_name = maps__maps_by_name(maps);
234
235 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
236 map__zput(maps_by_address[i]);
237 if (maps_by_name)
238 map__zput(maps_by_name[i]);
239 }
240 zfree(&maps_by_address);
241 zfree(&maps_by_name);
242 unwind__finish_access(maps);
243}
244
245struct maps *maps__new(struct machine *machine)
246{
247 struct maps *result;
248 RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
249
250 if (ADD_RC_CHK(result, maps))
251 maps__init(result, machine);
252
253 return result;
254}
255
256static void maps__delete(struct maps *maps)
257{
258 maps__exit(maps);
259 RC_CHK_FREE(maps);
260}
261
262struct maps *maps__get(struct maps *maps)
263{
264 struct maps *result;
265
266 if (RC_CHK_GET(result, maps))
267 refcount_inc(maps__refcnt(maps));
268
269 return result;
270}
271
272void maps__put(struct maps *maps)
273{
274 if (maps && refcount_dec_and_test(maps__refcnt(maps)))
275 maps__delete(maps);
276 else
277 RC_CHK_PUT(maps);
278}
279
280static void __maps__free_maps_by_name(struct maps *maps)
281{
282 if (!maps__maps_by_name(maps))
283 return;
284
285 /*
286 * Free everything to try to do it from the rbtree in the next search
287 */
288 for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
289 map__put(maps__maps_by_name(maps)[i]);
290
291 zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
292
293 /* Consistent with maps__init(). When maps_by_name == NULL, maps_by_name_sorted == false */
294 maps__set_maps_by_name_sorted(maps, false);
295}
296
297static int map__start_cmp(const void *a, const void *b)
298{
299 const struct map *map_a = *(const struct map * const *)a;
300 const struct map *map_b = *(const struct map * const *)b;
301 u64 map_a_start = map__start(map_a);
302 u64 map_b_start = map__start(map_b);
303
304 if (map_a_start == map_b_start) {
305 u64 map_a_end = map__end(map_a);
306 u64 map_b_end = map__end(map_b);
307
308 if (map_a_end == map_b_end) {
309 /* Ensure maps with the same addresses have a fixed order. */
310 if (RC_CHK_ACCESS(map_a) == RC_CHK_ACCESS(map_b))
311 return 0;
312 return (intptr_t)RC_CHK_ACCESS(map_a) > (intptr_t)RC_CHK_ACCESS(map_b)
313 ? 1 : -1;
314 }
315 return map_a_end > map_b_end ? 1 : -1;
316 }
317 return map_a_start > map_b_start ? 1 : -1;
318}
319
320static void __maps__sort_by_address(struct maps *maps)
321{
322 if (maps__maps_by_address_sorted(maps))
323 return;
324
325 qsort(maps__maps_by_address(maps),
326 maps__nr_maps(maps),
327 sizeof(struct map *),
328 map__start_cmp);
329 maps__set_maps_by_address_sorted(maps, true);
330}
331
332static void maps__sort_by_address(struct maps *maps)
333{
334 down_write(maps__lock(maps));
335 __maps__sort_by_address(maps);
336 up_write(maps__lock(maps));
337}
338
339static int map__strcmp(const void *a, const void *b)
340{
341 const struct map *map_a = *(const struct map * const *)a;
342 const struct map *map_b = *(const struct map * const *)b;
343 const struct dso *dso_a = map__dso(map_a);
344 const struct dso *dso_b = map__dso(map_b);
345 int ret = strcmp(dso__short_name(dso_a), dso__short_name(dso_b));
346
347 if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) {
348 /* Ensure distinct but name equal maps have an order. */
349 return map__start_cmp(a, b);
350 }
351 return ret;
352}
353
354static int maps__sort_by_name(struct maps *maps)
355{
356 int err = 0;
357
358 down_write(maps__lock(maps));
359 if (!maps__maps_by_name_sorted(maps)) {
360 struct map **maps_by_name = maps__maps_by_name(maps);
361
362 if (!maps_by_name) {
363 maps_by_name = malloc(RC_CHK_ACCESS(maps)->nr_maps_allocated *
364 sizeof(*maps_by_name));
365 if (!maps_by_name)
366 err = -ENOMEM;
367 else {
368 struct map **maps_by_address = maps__maps_by_address(maps);
369 unsigned int n = maps__nr_maps(maps);
370
371 maps__set_maps_by_name(maps, maps_by_name);
372 for (unsigned int i = 0; i < n; i++)
373 maps_by_name[i] = map__get(maps_by_address[i]);
374 }
375 }
376 if (!err) {
377 qsort(maps_by_name,
378 maps__nr_maps(maps),
379 sizeof(struct map *),
380 map__strcmp);
381 maps__set_maps_by_name_sorted(maps, true);
382 }
383 }
384 check_invariants(maps);
385 up_write(maps__lock(maps));
386 return err;
387}
388
389static unsigned int maps__by_address_index(const struct maps *maps, const struct map *map)
390{
391 struct map **maps_by_address = maps__maps_by_address(maps);
392
393 if (maps__maps_by_address_sorted(maps)) {
394 struct map **mapp =
395 bsearch(&map, maps__maps_by_address(maps), maps__nr_maps(maps),
396 sizeof(*mapp), map__start_cmp);
397
398 if (mapp)
399 return mapp - maps_by_address;
400 } else {
401 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
402 if (RC_CHK_ACCESS(maps_by_address[i]) == RC_CHK_ACCESS(map))
403 return i;
404 }
405 }
406 pr_err("Map missing from maps");
407 return -1;
408}
409
410static unsigned int maps__by_name_index(const struct maps *maps, const struct map *map)
411{
412 struct map **maps_by_name = maps__maps_by_name(maps);
413
414 if (maps__maps_by_name_sorted(maps)) {
415 struct map **mapp =
416 bsearch(&map, maps_by_name, maps__nr_maps(maps),
417 sizeof(*mapp), map__strcmp);
418
419 if (mapp)
420 return mapp - maps_by_name;
421 } else {
422 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
423 if (RC_CHK_ACCESS(maps_by_name[i]) == RC_CHK_ACCESS(map))
424 return i;
425 }
426 }
427 pr_err("Map missing from maps");
428 return -1;
429}
430
431static int __maps__insert(struct maps *maps, struct map *new)
432{
433 struct map **maps_by_address = maps__maps_by_address(maps);
434 struct map **maps_by_name = maps__maps_by_name(maps);
435 const struct dso *dso = map__dso(new);
436 unsigned int nr_maps = maps__nr_maps(maps);
437 unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated;
438
439 if (nr_maps + 1 > nr_allocate) {
440 nr_allocate = !nr_allocate ? 32 : nr_allocate * 2;
441
442 maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new));
443 if (!maps_by_address)
444 return -ENOMEM;
445
446 maps__set_maps_by_address(maps, maps_by_address);
447 if (maps_by_name) {
448 maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new));
449 if (!maps_by_name) {
450 /*
451 * If by name fails, just disable by name and it will
452 * recompute next time it is required.
453 */
454 __maps__free_maps_by_name(maps);
455 }
456 maps__set_maps_by_name(maps, maps_by_name);
457 }
458 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
459 }
460 /* Insert the value at the end. */
461 maps_by_address[nr_maps] = map__get(new);
462 if (maps_by_name)
463 maps_by_name[nr_maps] = map__get(new);
464
465 nr_maps++;
466 RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
467
468 /*
469 * Recompute if things are sorted. If things are inserted in a sorted
470 * manner, for example by processing /proc/pid/maps, then no
471 * sorting/resorting will be necessary.
472 */
473 if (nr_maps == 1) {
474 /* If there's just 1 entry then maps are sorted. */
475 maps__set_maps_by_address_sorted(maps, true);
476 maps__set_maps_by_name_sorted(maps, maps_by_name != NULL);
477 } else {
478 /* Sorted if maps were already sorted and this map starts after the last one. */
479 maps__set_maps_by_address_sorted(maps,
480 maps__maps_by_address_sorted(maps) &&
481 map__end(maps_by_address[nr_maps - 2]) <= map__start(new));
482 maps__set_maps_by_name_sorted(maps, false);
483 }
484 if (map__end(new) < map__start(new))
485 RC_CHK_ACCESS(maps)->ends_broken = true;
486 if (dso && dso__kernel(dso)) {
487 struct kmap *kmap = map__kmap(new);
488
489 if (kmap)
490 kmap->kmaps = maps;
491 else
492 pr_err("Internal error: kernel dso with non kernel map\n");
493 }
494 return 0;
495}
496
497int maps__insert(struct maps *maps, struct map *map)
498{
499 int ret;
500
501 down_write(maps__lock(maps));
502 ret = __maps__insert(maps, map);
503 check_invariants(maps);
504 up_write(maps__lock(maps));
505 return ret;
506}
507
508static void __maps__remove(struct maps *maps, struct map *map)
509{
510 struct map **maps_by_address = maps__maps_by_address(maps);
511 struct map **maps_by_name = maps__maps_by_name(maps);
512 unsigned int nr_maps = maps__nr_maps(maps);
513 unsigned int address_idx;
514
515 /* Slide later mappings over the one to remove */
516 address_idx = maps__by_address_index(maps, map);
517 map__put(maps_by_address[address_idx]);
518 memmove(&maps_by_address[address_idx],
519 &maps_by_address[address_idx + 1],
520 (nr_maps - address_idx - 1) * sizeof(*maps_by_address));
521
522 if (maps_by_name) {
523 unsigned int name_idx = maps__by_name_index(maps, map);
524
525 map__put(maps_by_name[name_idx]);
526 memmove(&maps_by_name[name_idx],
527 &maps_by_name[name_idx + 1],
528 (nr_maps - name_idx - 1) * sizeof(*maps_by_name));
529 }
530
531 --RC_CHK_ACCESS(maps)->nr_maps;
532}
533
534void maps__remove(struct maps *maps, struct map *map)
535{
536 down_write(maps__lock(maps));
537 __maps__remove(maps, map);
538 check_invariants(maps);
539 up_write(maps__lock(maps));
540}
541
542bool maps__empty(struct maps *maps)
543{
544 bool res;
545
546 down_read(maps__lock(maps));
547 res = maps__nr_maps(maps) == 0;
548 up_read(maps__lock(maps));
549
550 return res;
551}
552
553bool maps__equal(struct maps *a, struct maps *b)
554{
555 return RC_CHK_EQUAL(a, b);
556}
557
558int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
559{
560 bool done = false;
561 int ret = 0;
562
563 /* See locking/sorting note. */
564 while (!done) {
565 down_read(maps__lock(maps));
566 if (maps__maps_by_address_sorted(maps)) {
567 /*
568 * maps__for_each_map callbacks may buggily/unsafely
569 * insert into maps_by_address. Deliberately reload
570 * maps__nr_maps and maps_by_address on each iteration
571 * to avoid using memory freed by maps__insert growing
572 * the array - this may cause maps to be skipped or
573 * repeated.
574 */
575 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
576 struct map **maps_by_address = maps__maps_by_address(maps);
577 struct map *map = maps_by_address[i];
578
579 ret = cb(map, data);
580 if (ret)
581 break;
582 }
583 done = true;
584 }
585 up_read(maps__lock(maps));
586 if (!done)
587 maps__sort_by_address(maps);
588 }
589 return ret;
590}
591
592void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
593{
594 struct map **maps_by_address;
595
596 down_write(maps__lock(maps));
597
598 maps_by_address = maps__maps_by_address(maps);
599 for (unsigned int i = 0; i < maps__nr_maps(maps);) {
600 if (cb(maps_by_address[i], data))
601 __maps__remove(maps, maps_by_address[i]);
602 else
603 i++;
604 }
605 check_invariants(maps);
606 up_write(maps__lock(maps));
607}
608
609struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
610{
611 struct map *map = maps__find(maps, addr);
612 struct symbol *result = NULL;
613
614 /* Ensure map is loaded before using map->map_ip */
615 if (map != NULL && map__load(map) >= 0)
616 result = map__find_symbol(map, map__map_ip(map, addr));
617
618 if (mapp)
619 *mapp = map;
620 else
621 map__put(map);
622
623 return result;
624}
625
626struct maps__find_symbol_by_name_args {
627 struct map **mapp;
628 const char *name;
629 struct symbol *sym;
630};
631
632static int maps__find_symbol_by_name_cb(struct map *map, void *data)
633{
634 struct maps__find_symbol_by_name_args *args = data;
635
636 args->sym = map__find_symbol_by_name(map, args->name);
637 if (!args->sym)
638 return 0;
639
640 if (!map__contains_symbol(map, args->sym)) {
641 args->sym = NULL;
642 return 0;
643 }
644
645 if (args->mapp != NULL)
646 *args->mapp = map__get(map);
647 return 1;
648}
649
650struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
651{
652 struct maps__find_symbol_by_name_args args = {
653 .mapp = mapp,
654 .name = name,
655 .sym = NULL,
656 };
657
658 maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args);
659 return args.sym;
660}
661
662int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
663{
664 if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
665 if (maps == NULL)
666 return -1;
667 ams->ms.map = maps__find(maps, ams->addr);
668 if (ams->ms.map == NULL)
669 return -1;
670 }
671
672 ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
673 ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
674
675 return ams->ms.sym ? 0 : -1;
676}
677
678struct maps__fprintf_args {
679 FILE *fp;
680 size_t printed;
681};
682
683static int maps__fprintf_cb(struct map *map, void *data)
684{
685 struct maps__fprintf_args *args = data;
686
687 args->printed += fprintf(args->fp, "Map:");
688 args->printed += map__fprintf(map, args->fp);
689 if (verbose > 2) {
690 args->printed += dso__fprintf(map__dso(map), args->fp);
691 args->printed += fprintf(args->fp, "--\n");
692 }
693 return 0;
694}
695
696size_t maps__fprintf(struct maps *maps, FILE *fp)
697{
698 struct maps__fprintf_args args = {
699 .fp = fp,
700 .printed = 0,
701 };
702
703 maps__for_each_map(maps, maps__fprintf_cb, &args);
704
705 return args.printed;
706}
707
708/*
709 * Find first map where end > map->start.
710 * Same as find_vma() in kernel.
711 */
712static unsigned int first_ending_after(struct maps *maps, const struct map *map)
713{
714 struct map **maps_by_address = maps__maps_by_address(maps);
715 int low = 0, high = (int)maps__nr_maps(maps) - 1, first = high + 1;
716
717 assert(maps__maps_by_address_sorted(maps));
718 if (low <= high && map__end(maps_by_address[0]) > map__start(map))
719 return 0;
720
721 while (low <= high) {
722 int mid = (low + high) / 2;
723 struct map *pos = maps_by_address[mid];
724
725 if (map__end(pos) > map__start(map)) {
726 first = mid;
727 if (map__start(pos) <= map__start(map)) {
728 /* Entry overlaps map. */
729 break;
730 }
731 high = mid - 1;
732 } else
733 low = mid + 1;
734 }
735 return first;
736}
737
738/*
739 * Adds new to maps, if new overlaps existing entries then the existing maps are
740 * adjusted or removed so that new fits without overlapping any entries.
741 */
742static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
743{
744 struct map **maps_by_address;
745 int err = 0;
746 FILE *fp = debug_file();
747
748sort_again:
749 if (!maps__maps_by_address_sorted(maps))
750 __maps__sort_by_address(maps);
751
752 maps_by_address = maps__maps_by_address(maps);
753 /*
754 * Iterate through entries where the end of the existing entry is
755 * greater-than the new map's start.
756 */
757 for (unsigned int i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) {
758 struct map *pos = maps_by_address[i];
759 struct map *before = NULL, *after = NULL;
760
761 /*
762 * Stop if current map starts after map->end.
763 * Maps are ordered by start: next will not overlap for sure.
764 */
765 if (map__start(pos) >= map__end(new))
766 break;
767
768 if (use_browser) {
769 pr_debug("overlapping maps in %s (disable tui for more info)\n",
770 dso__name(map__dso(new)));
771 } else if (verbose >= 2) {
772 pr_debug("overlapping maps:\n");
773 map__fprintf(new, fp);
774 map__fprintf(pos, fp);
775 }
776
777 /*
778 * Now check if we need to create new maps for areas not
779 * overlapped by the new map:
780 */
781 if (map__start(new) > map__start(pos)) {
782 /* Map starts within existing map. Need to shorten the existing map. */
783 before = map__clone(pos);
784
785 if (before == NULL) {
786 err = -ENOMEM;
787 goto out_err;
788 }
789 map__set_end(before, map__start(new));
790
791 if (verbose >= 2 && !use_browser)
792 map__fprintf(before, fp);
793 }
794 if (map__end(new) < map__end(pos)) {
795 /* The new map isn't as long as the existing map. */
796 after = map__clone(pos);
797
798 if (after == NULL) {
799 map__zput(before);
800 err = -ENOMEM;
801 goto out_err;
802 }
803
804 map__set_start(after, map__end(new));
805 map__add_pgoff(after, map__end(new) - map__start(pos));
806 assert(map__map_ip(pos, map__end(new)) ==
807 map__map_ip(after, map__end(new)));
808
809 if (verbose >= 2 && !use_browser)
810 map__fprintf(after, fp);
811 }
812 /*
813 * If adding one entry, for `before` or `after`, we can replace
814 * the existing entry. If both `before` and `after` are
815 * necessary than an insert is needed. If the existing entry
816 * entirely overlaps the existing entry it can just be removed.
817 */
818 if (before) {
819 map__put(maps_by_address[i]);
820 maps_by_address[i] = before;
821 /* Maps are still ordered, go to next one. */
822 i++;
823 if (after) {
824 __maps__insert(maps, after);
825 map__put(after);
826 if (!maps__maps_by_address_sorted(maps)) {
827 /*
828 * Sorting broken so invariants don't
829 * hold, sort and go again.
830 */
831 goto sort_again;
832 }
833 /*
834 * Maps are still ordered, skip after and go to
835 * next one (terminate loop).
836 */
837 i++;
838 }
839 } else if (after) {
840 map__put(maps_by_address[i]);
841 maps_by_address[i] = after;
842 /* Maps are ordered, go to next one. */
843 i++;
844 } else {
845 __maps__remove(maps, pos);
846 /*
847 * Maps are ordered but no need to increase `i` as the
848 * later maps were moved down.
849 */
850 }
851 check_invariants(maps);
852 }
853 /* Add the map. */
854 __maps__insert(maps, new);
855out_err:
856 return err;
857}
858
859int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
860{
861 int err;
862
863 down_write(maps__lock(maps));
864 err = __maps__fixup_overlap_and_insert(maps, new);
865 up_write(maps__lock(maps));
866 return err;
867}
868
869int maps__copy_from(struct maps *dest, struct maps *parent)
870{
871 /* Note, if struct map were immutable then cloning could use ref counts. */
872 struct map **parent_maps_by_address;
873 int err = 0;
874 unsigned int n;
875
876 down_write(maps__lock(dest));
877 down_read(maps__lock(parent));
878
879 parent_maps_by_address = maps__maps_by_address(parent);
880 n = maps__nr_maps(parent);
881 if (maps__nr_maps(dest) == 0) {
882 /* No existing mappings so just copy from parent to avoid reallocs in insert. */
883 unsigned int nr_maps_allocated = RC_CHK_ACCESS(parent)->nr_maps_allocated;
884 struct map **dest_maps_by_address =
885 malloc(nr_maps_allocated * sizeof(struct map *));
886 struct map **dest_maps_by_name = NULL;
887
888 if (!dest_maps_by_address)
889 err = -ENOMEM;
890 else {
891 if (maps__maps_by_name(parent)) {
892 dest_maps_by_name =
893 malloc(nr_maps_allocated * sizeof(struct map *));
894 }
895
896 RC_CHK_ACCESS(dest)->maps_by_address = dest_maps_by_address;
897 RC_CHK_ACCESS(dest)->maps_by_name = dest_maps_by_name;
898 RC_CHK_ACCESS(dest)->nr_maps_allocated = nr_maps_allocated;
899 }
900
901 for (unsigned int i = 0; !err && i < n; i++) {
902 struct map *pos = parent_maps_by_address[i];
903 struct map *new = map__clone(pos);
904
905 if (!new)
906 err = -ENOMEM;
907 else {
908 err = unwind__prepare_access(dest, new, NULL);
909 if (!err) {
910 dest_maps_by_address[i] = new;
911 if (dest_maps_by_name)
912 dest_maps_by_name[i] = map__get(new);
913 RC_CHK_ACCESS(dest)->nr_maps = i + 1;
914 }
915 }
916 if (err)
917 map__put(new);
918 }
919 maps__set_maps_by_address_sorted(dest, maps__maps_by_address_sorted(parent));
920 if (!err) {
921 RC_CHK_ACCESS(dest)->last_search_by_name_idx =
922 RC_CHK_ACCESS(parent)->last_search_by_name_idx;
923 maps__set_maps_by_name_sorted(dest,
924 dest_maps_by_name &&
925 maps__maps_by_name_sorted(parent));
926 } else {
927 RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0;
928 maps__set_maps_by_name_sorted(dest, false);
929 }
930 } else {
931 /* Unexpected copying to a maps containing entries. */
932 for (unsigned int i = 0; !err && i < n; i++) {
933 struct map *pos = parent_maps_by_address[i];
934 struct map *new = map__clone(pos);
935
936 if (!new)
937 err = -ENOMEM;
938 else {
939 err = unwind__prepare_access(dest, new, NULL);
940 if (!err)
941 err = __maps__insert(dest, new);
942 }
943 map__put(new);
944 }
945 }
946 check_invariants(dest);
947
948 up_read(maps__lock(parent));
949 up_write(maps__lock(dest));
950 return err;
951}
952
953static int map__addr_cmp(const void *key, const void *entry)
954{
955 const u64 ip = *(const u64 *)key;
956 const struct map *map = *(const struct map * const *)entry;
957
958 if (ip < map__start(map))
959 return -1;
960 if (ip >= map__end(map))
961 return 1;
962 return 0;
963}
964
965struct map *maps__find(struct maps *maps, u64 ip)
966{
967 struct map *result = NULL;
968 bool done = false;
969
970 /* See locking/sorting note. */
971 while (!done) {
972 down_read(maps__lock(maps));
973 if (maps__maps_by_address_sorted(maps)) {
974 struct map **mapp =
975 bsearch(&ip, maps__maps_by_address(maps), maps__nr_maps(maps),
976 sizeof(*mapp), map__addr_cmp);
977
978 if (mapp)
979 result = map__get(*mapp);
980 done = true;
981 }
982 up_read(maps__lock(maps));
983 if (!done)
984 maps__sort_by_address(maps);
985 }
986 return result;
987}
988
989static int map__strcmp_name(const void *name, const void *b)
990{
991 const struct dso *dso = map__dso(*(const struct map **)b);
992
993 return strcmp(name, dso__short_name(dso));
994}
995
996struct map *maps__find_by_name(struct maps *maps, const char *name)
997{
998 struct map *result = NULL;
999 bool done = false;
1000
1001 /* See locking/sorting note. */
1002 while (!done) {
1003 unsigned int i;
1004
1005 down_read(maps__lock(maps));
1006
1007 /* First check last found entry. */
1008 i = RC_CHK_ACCESS(maps)->last_search_by_name_idx;
1009 if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) {
1010 struct dso *dso = map__dso(maps__maps_by_name(maps)[i]);
1011
1012 if (dso && strcmp(dso__short_name(dso), name) == 0) {
1013 result = map__get(maps__maps_by_name(maps)[i]);
1014 done = true;
1015 }
1016 }
1017
1018 /* Second search sorted array. */
1019 if (!done && maps__maps_by_name_sorted(maps)) {
1020 struct map **mapp =
1021 bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
1022 sizeof(*mapp), map__strcmp_name);
1023
1024 if (mapp) {
1025 result = map__get(*mapp);
1026 i = mapp - maps__maps_by_name(maps);
1027 RC_CHK_ACCESS(maps)->last_search_by_name_idx = i;
1028 }
1029 done = true;
1030 }
1031 up_read(maps__lock(maps));
1032 if (!done) {
1033 /* Sort and retry binary search. */
1034 if (maps__sort_by_name(maps)) {
1035 /*
1036 * Memory allocation failed do linear search
1037 * through address sorted maps.
1038 */
1039 struct map **maps_by_address;
1040 unsigned int n;
1041
1042 down_read(maps__lock(maps));
1043 maps_by_address = maps__maps_by_address(maps);
1044 n = maps__nr_maps(maps);
1045 for (i = 0; i < n; i++) {
1046 struct map *pos = maps_by_address[i];
1047 struct dso *dso = map__dso(pos);
1048
1049 if (dso && strcmp(dso__short_name(dso), name) == 0) {
1050 result = map__get(pos);
1051 break;
1052 }
1053 }
1054 up_read(maps__lock(maps));
1055 done = true;
1056 }
1057 }
1058 }
1059 return result;
1060}
1061
1062struct map *maps__find_next_entry(struct maps *maps, struct map *map)
1063{
1064 unsigned int i;
1065 struct map *result = NULL;
1066
1067 down_read(maps__lock(maps));
1068 i = maps__by_address_index(maps, map);
1069 if (i < maps__nr_maps(maps))
1070 result = map__get(maps__maps_by_address(maps)[i]);
1071
1072 up_read(maps__lock(maps));
1073 return result;
1074}
1075
1076void maps__fixup_end(struct maps *maps)
1077{
1078 struct map **maps_by_address;
1079 unsigned int n;
1080
1081 down_write(maps__lock(maps));
1082 if (!maps__maps_by_address_sorted(maps))
1083 __maps__sort_by_address(maps);
1084
1085 maps_by_address = maps__maps_by_address(maps);
1086 n = maps__nr_maps(maps);
1087 for (unsigned int i = 1; i < n; i++) {
1088 struct map *prev = maps_by_address[i - 1];
1089 struct map *curr = maps_by_address[i];
1090
1091 if (!map__end(prev) || map__end(prev) > map__start(curr))
1092 map__set_end(prev, map__start(curr));
1093 }
1094
1095 /*
1096 * We still haven't the actual symbols, so guess the
1097 * last map final address.
1098 */
1099 if (n > 0 && !map__end(maps_by_address[n - 1]))
1100 map__set_end(maps_by_address[n - 1], ~0ULL);
1101
1102 RC_CHK_ACCESS(maps)->ends_broken = false;
1103 check_invariants(maps);
1104
1105 up_write(maps__lock(maps));
1106}
1107
1108/*
1109 * Merges map into maps by splitting the new map within the existing map
1110 * regions.
1111 */
1112int maps__merge_in(struct maps *kmaps, struct map *new_map)
1113{
1114 unsigned int first_after_, kmaps__nr_maps;
1115 struct map **kmaps_maps_by_address;
1116 struct map **merged_maps_by_address;
1117 unsigned int merged_nr_maps_allocated;
1118
1119 /* First try under a read lock. */
1120 while (true) {
1121 down_read(maps__lock(kmaps));
1122 if (maps__maps_by_address_sorted(kmaps))
1123 break;
1124
1125 up_read(maps__lock(kmaps));
1126
1127 /* First after binary search requires sorted maps. Sort and try again. */
1128 maps__sort_by_address(kmaps);
1129 }
1130 first_after_ = first_ending_after(kmaps, new_map);
1131 kmaps_maps_by_address = maps__maps_by_address(kmaps);
1132
1133 if (first_after_ >= maps__nr_maps(kmaps) ||
1134 map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
1135 /* No overlap so regular insert suffices. */
1136 up_read(maps__lock(kmaps));
1137 return maps__insert(kmaps, new_map);
1138 }
1139 up_read(maps__lock(kmaps));
1140
1141 /* Plain insert with a read-lock failed, try again now with the write lock. */
1142 down_write(maps__lock(kmaps));
1143 if (!maps__maps_by_address_sorted(kmaps))
1144 __maps__sort_by_address(kmaps);
1145
1146 first_after_ = first_ending_after(kmaps, new_map);
1147 kmaps_maps_by_address = maps__maps_by_address(kmaps);
1148 kmaps__nr_maps = maps__nr_maps(kmaps);
1149
1150 if (first_after_ >= kmaps__nr_maps ||
1151 map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
1152 /* No overlap so regular insert suffices. */
1153 int ret = __maps__insert(kmaps, new_map);
1154
1155 check_invariants(kmaps);
1156 up_write(maps__lock(kmaps));
1157 return ret;
1158 }
1159 /* Array to merge into, possibly 1 more for the sake of new_map. */
1160 merged_nr_maps_allocated = RC_CHK_ACCESS(kmaps)->nr_maps_allocated;
1161 if (kmaps__nr_maps + 1 == merged_nr_maps_allocated)
1162 merged_nr_maps_allocated++;
1163
1164 merged_maps_by_address = malloc(merged_nr_maps_allocated * sizeof(*merged_maps_by_address));
1165 if (!merged_maps_by_address) {
1166 up_write(maps__lock(kmaps));
1167 return -ENOMEM;
1168 }
1169 maps__set_maps_by_address(kmaps, merged_maps_by_address);
1170 maps__set_maps_by_address_sorted(kmaps, true);
1171 __maps__free_maps_by_name(kmaps);
1172 maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated);
1173
1174 /* Copy entries before the new_map that can't overlap. */
1175 for (unsigned int i = 0; i < first_after_; i++)
1176 merged_maps_by_address[i] = map__get(kmaps_maps_by_address[i]);
1177
1178 maps__set_nr_maps(kmaps, first_after_);
1179
1180 /* Add the new map, it will be split when the later overlapping mappings are added. */
1181 __maps__insert(kmaps, new_map);
1182
1183 /* Insert mappings after new_map, splitting new_map in the process. */
1184 for (unsigned int i = first_after_; i < kmaps__nr_maps; i++)
1185 __maps__fixup_overlap_and_insert(kmaps, kmaps_maps_by_address[i]);
1186
1187 /* Copy the maps from merged into kmaps. */
1188 for (unsigned int i = 0; i < kmaps__nr_maps; i++)
1189 map__zput(kmaps_maps_by_address[i]);
1190
1191 free(kmaps_maps_by_address);
1192 check_invariants(kmaps);
1193 up_write(maps__lock(kmaps));
1194 return 0;
1195}
1196
1197void maps__load_first(struct maps *maps)
1198{
1199 down_read(maps__lock(maps));
1200
1201 if (maps__nr_maps(maps) > 0)
1202 map__load(maps__maps_by_address(maps)[0]);
1203
1204 up_read(maps__lock(maps));
1205}