Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <asm/bug.h>
3#include <linux/kernel.h>
4#include <linux/string.h>
5#include <linux/zalloc.h>
6#include <sys/time.h>
7#include <sys/resource.h>
8#include <sys/types.h>
9#include <sys/stat.h>
10#include <unistd.h>
11#include <errno.h>
12#include <fcntl.h>
13#include <stdlib.h>
14#ifdef HAVE_LIBBPF_SUPPORT
15#include <bpf/libbpf.h>
16#include "bpf-event.h"
17#include "bpf-utils.h"
18#endif
19#include "compress.h"
20#include "env.h"
21#include "namespaces.h"
22#include "path.h"
23#include "map.h"
24#include "symbol.h"
25#include "srcline.h"
26#include "dso.h"
27#include "dsos.h"
28#include "machine.h"
29#include "auxtrace.h"
30#include "util.h" /* O_CLOEXEC for older systems */
31#include "debug.h"
32#include "string2.h"
33#include "vdso.h"
34
35static const char * const debuglink_paths[] = {
36 "%.0s%s",
37 "%s/%s",
38 "%s/.debug/%s",
39 "/usr/lib/debug%s/%s"
40};
41
42char dso__symtab_origin(const struct dso *dso)
43{
44 static const char origin[] = {
45 [DSO_BINARY_TYPE__KALLSYMS] = 'k',
46 [DSO_BINARY_TYPE__VMLINUX] = 'v',
47 [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
48 [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
49 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
50 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D',
51 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
52 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
53 [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x',
54 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
55 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
56 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
57 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
58 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
59 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
60 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
61 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
62 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
63 };
64
65 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
66 return '!';
67 return origin[dso->symtab_type];
68}
69
70bool dso__is_object_file(const struct dso *dso)
71{
72 switch (dso->binary_type) {
73 case DSO_BINARY_TYPE__KALLSYMS:
74 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
75 case DSO_BINARY_TYPE__JAVA_JIT:
76 case DSO_BINARY_TYPE__BPF_PROG_INFO:
77 case DSO_BINARY_TYPE__BPF_IMAGE:
78 case DSO_BINARY_TYPE__OOL:
79 return false;
80 case DSO_BINARY_TYPE__VMLINUX:
81 case DSO_BINARY_TYPE__GUEST_VMLINUX:
82 case DSO_BINARY_TYPE__DEBUGLINK:
83 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
84 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
85 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
86 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
87 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
88 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
89 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
90 case DSO_BINARY_TYPE__GUEST_KMODULE:
91 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
92 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
93 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
94 case DSO_BINARY_TYPE__KCORE:
95 case DSO_BINARY_TYPE__GUEST_KCORE:
96 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
97 case DSO_BINARY_TYPE__NOT_FOUND:
98 default:
99 return true;
100 }
101}
102
103int dso__read_binary_type_filename(const struct dso *dso,
104 enum dso_binary_type type,
105 char *root_dir, char *filename, size_t size)
106{
107 char build_id_hex[SBUILD_ID_SIZE];
108 int ret = 0;
109 size_t len;
110
111 switch (type) {
112 case DSO_BINARY_TYPE__DEBUGLINK:
113 {
114 const char *last_slash;
115 char dso_dir[PATH_MAX];
116 char symfile[PATH_MAX];
117 unsigned int i;
118
119 len = __symbol__join_symfs(filename, size, dso->long_name);
120 last_slash = filename + len;
121 while (last_slash != filename && *last_slash != '/')
122 last_slash--;
123
124 strncpy(dso_dir, filename, last_slash - filename);
125 dso_dir[last_slash-filename] = '\0';
126
127 if (!is_regular_file(filename)) {
128 ret = -1;
129 break;
130 }
131
132 ret = filename__read_debuglink(filename, symfile, PATH_MAX);
133 if (ret)
134 break;
135
136 /* Check predefined locations where debug file might reside */
137 ret = -1;
138 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
139 snprintf(filename, size,
140 debuglink_paths[i], dso_dir, symfile);
141 if (is_regular_file(filename)) {
142 ret = 0;
143 break;
144 }
145 }
146
147 break;
148 }
149 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
150 if (dso__build_id_filename(dso, filename, size, false) == NULL)
151 ret = -1;
152 break;
153
154 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
155 if (dso__build_id_filename(dso, filename, size, true) == NULL)
156 ret = -1;
157 break;
158
159 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
160 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
161 snprintf(filename + len, size - len, "%s.debug", dso->long_name);
162 break;
163
164 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
165 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
166 snprintf(filename + len, size - len, "%s", dso->long_name);
167 break;
168
169 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
170 /*
171 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
172 * /usr/lib/debug/lib when it is expected to be in
173 * /usr/lib/debug/usr/lib
174 */
175 if (strlen(dso->long_name) < 9 ||
176 strncmp(dso->long_name, "/usr/lib/", 9)) {
177 ret = -1;
178 break;
179 }
180 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
181 snprintf(filename + len, size - len, "%s", dso->long_name + 4);
182 break;
183
184 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
185 {
186 const char *last_slash;
187 size_t dir_size;
188
189 last_slash = dso->long_name + dso->long_name_len;
190 while (last_slash != dso->long_name && *last_slash != '/')
191 last_slash--;
192
193 len = __symbol__join_symfs(filename, size, "");
194 dir_size = last_slash - dso->long_name + 2;
195 if (dir_size > (size - len)) {
196 ret = -1;
197 break;
198 }
199 len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
200 len += scnprintf(filename + len , size - len, ".debug%s",
201 last_slash);
202 break;
203 }
204
205 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
206 if (!dso->has_build_id) {
207 ret = -1;
208 break;
209 }
210
211 build_id__sprintf(&dso->bid, build_id_hex);
212 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
213 snprintf(filename + len, size - len, "%.2s/%s.debug",
214 build_id_hex, build_id_hex + 2);
215 break;
216
217 case DSO_BINARY_TYPE__VMLINUX:
218 case DSO_BINARY_TYPE__GUEST_VMLINUX:
219 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
220 __symbol__join_symfs(filename, size, dso->long_name);
221 break;
222
223 case DSO_BINARY_TYPE__GUEST_KMODULE:
224 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
225 path__join3(filename, size, symbol_conf.symfs,
226 root_dir, dso->long_name);
227 break;
228
229 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
230 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
231 __symbol__join_symfs(filename, size, dso->long_name);
232 break;
233
234 case DSO_BINARY_TYPE__KCORE:
235 case DSO_BINARY_TYPE__GUEST_KCORE:
236 snprintf(filename, size, "%s", dso->long_name);
237 break;
238
239 default:
240 case DSO_BINARY_TYPE__KALLSYMS:
241 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
242 case DSO_BINARY_TYPE__JAVA_JIT:
243 case DSO_BINARY_TYPE__BPF_PROG_INFO:
244 case DSO_BINARY_TYPE__BPF_IMAGE:
245 case DSO_BINARY_TYPE__OOL:
246 case DSO_BINARY_TYPE__NOT_FOUND:
247 ret = -1;
248 break;
249 }
250
251 return ret;
252}
253
254enum {
255 COMP_ID__NONE = 0,
256};
257
258static const struct {
259 const char *fmt;
260 int (*decompress)(const char *input, int output);
261 bool (*is_compressed)(const char *input);
262} compressions[] = {
263 [COMP_ID__NONE] = { .fmt = NULL, },
264#ifdef HAVE_ZLIB_SUPPORT
265 { "gz", gzip_decompress_to_file, gzip_is_compressed },
266#endif
267#ifdef HAVE_LZMA_SUPPORT
268 { "xz", lzma_decompress_to_file, lzma_is_compressed },
269#endif
270 { NULL, NULL, NULL },
271};
272
273static int is_supported_compression(const char *ext)
274{
275 unsigned i;
276
277 for (i = 1; compressions[i].fmt; i++) {
278 if (!strcmp(ext, compressions[i].fmt))
279 return i;
280 }
281 return COMP_ID__NONE;
282}
283
284bool is_kernel_module(const char *pathname, int cpumode)
285{
286 struct kmod_path m;
287 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
288
289 WARN_ONCE(mode != cpumode,
290 "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
291 cpumode);
292
293 switch (mode) {
294 case PERF_RECORD_MISC_USER:
295 case PERF_RECORD_MISC_HYPERVISOR:
296 case PERF_RECORD_MISC_GUEST_USER:
297 return false;
298 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
299 default:
300 if (kmod_path__parse(&m, pathname)) {
301 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
302 pathname);
303 return true;
304 }
305 }
306
307 return m.kmod;
308}
309
310bool dso__needs_decompress(struct dso *dso)
311{
312 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
313 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
314}
315
316int filename__decompress(const char *name, char *pathname,
317 size_t len, int comp, int *err)
318{
319 char tmpbuf[] = KMOD_DECOMP_NAME;
320 int fd = -1;
321
322 /*
323 * We have proper compression id for DSO and yet the file
324 * behind the 'name' can still be plain uncompressed object.
325 *
326 * The reason is behind the logic we open the DSO object files,
327 * when we try all possible 'debug' objects until we find the
328 * data. So even if the DSO is represented by 'krava.xz' module,
329 * we can end up here opening ~/.debug/....23432432/debug' file
330 * which is not compressed.
331 *
332 * To keep this transparent, we detect this and return the file
333 * descriptor to the uncompressed file.
334 */
335 if (!compressions[comp].is_compressed(name))
336 return open(name, O_RDONLY);
337
338 fd = mkstemp(tmpbuf);
339 if (fd < 0) {
340 *err = errno;
341 return -1;
342 }
343
344 if (compressions[comp].decompress(name, fd)) {
345 *err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
346 close(fd);
347 fd = -1;
348 }
349
350 if (!pathname || (fd < 0))
351 unlink(tmpbuf);
352
353 if (pathname && (fd >= 0))
354 strlcpy(pathname, tmpbuf, len);
355
356 return fd;
357}
358
359static int decompress_kmodule(struct dso *dso, const char *name,
360 char *pathname, size_t len)
361{
362 if (!dso__needs_decompress(dso))
363 return -1;
364
365 if (dso->comp == COMP_ID__NONE)
366 return -1;
367
368 return filename__decompress(name, pathname, len, dso->comp,
369 &dso->load_errno);
370}
371
372int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
373{
374 return decompress_kmodule(dso, name, NULL, 0);
375}
376
377int dso__decompress_kmodule_path(struct dso *dso, const char *name,
378 char *pathname, size_t len)
379{
380 int fd = decompress_kmodule(dso, name, pathname, len);
381
382 close(fd);
383 return fd >= 0 ? 0 : -1;
384}
385
386/*
387 * Parses kernel module specified in @path and updates
388 * @m argument like:
389 *
390 * @comp - true if @path contains supported compression suffix,
391 * false otherwise
392 * @kmod - true if @path contains '.ko' suffix in right position,
393 * false otherwise
394 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
395 * of the kernel module without suffixes, otherwise strudup-ed
396 * base name of @path
397 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
398 * the compression suffix
399 *
400 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
401 */
402int __kmod_path__parse(struct kmod_path *m, const char *path,
403 bool alloc_name)
404{
405 const char *name = strrchr(path, '/');
406 const char *ext = strrchr(path, '.');
407 bool is_simple_name = false;
408
409 memset(m, 0x0, sizeof(*m));
410 name = name ? name + 1 : path;
411
412 /*
413 * '.' is also a valid character for module name. For example:
414 * [aaa.bbb] is a valid module name. '[' should have higher
415 * priority than '.ko' suffix.
416 *
417 * The kernel names are from machine__mmap_name. Such
418 * name should belong to kernel itself, not kernel module.
419 */
420 if (name[0] == '[') {
421 is_simple_name = true;
422 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
423 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
424 (strncmp(name, "[vdso]", 6) == 0) ||
425 (strncmp(name, "[vdso32]", 8) == 0) ||
426 (strncmp(name, "[vdsox32]", 9) == 0) ||
427 (strncmp(name, "[vsyscall]", 10) == 0)) {
428 m->kmod = false;
429
430 } else
431 m->kmod = true;
432 }
433
434 /* No extension, just return name. */
435 if ((ext == NULL) || is_simple_name) {
436 if (alloc_name) {
437 m->name = strdup(name);
438 return m->name ? 0 : -ENOMEM;
439 }
440 return 0;
441 }
442
443 m->comp = is_supported_compression(ext + 1);
444 if (m->comp > COMP_ID__NONE)
445 ext -= 3;
446
447 /* Check .ko extension only if there's enough name left. */
448 if (ext > name)
449 m->kmod = !strncmp(ext, ".ko", 3);
450
451 if (alloc_name) {
452 if (m->kmod) {
453 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
454 return -ENOMEM;
455 } else {
456 if (asprintf(&m->name, "%s", name) == -1)
457 return -ENOMEM;
458 }
459
460 strreplace(m->name, '-', '_');
461 }
462
463 return 0;
464}
465
466void dso__set_module_info(struct dso *dso, struct kmod_path *m,
467 struct machine *machine)
468{
469 if (machine__is_host(machine))
470 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
471 else
472 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
473
474 /* _KMODULE_COMP should be next to _KMODULE */
475 if (m->kmod && m->comp) {
476 dso->symtab_type++;
477 dso->comp = m->comp;
478 }
479
480 dso->is_kmod = 1;
481 dso__set_short_name(dso, strdup(m->name), true);
482}
483
484/*
485 * Global list of open DSOs and the counter.
486 */
487static LIST_HEAD(dso__data_open);
488static long dso__data_open_cnt;
489static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
490
491static void dso__list_add(struct dso *dso)
492{
493 list_add_tail(&dso->data.open_entry, &dso__data_open);
494 dso__data_open_cnt++;
495}
496
497static void dso__list_del(struct dso *dso)
498{
499 list_del_init(&dso->data.open_entry);
500 WARN_ONCE(dso__data_open_cnt <= 0,
501 "DSO data fd counter out of bounds.");
502 dso__data_open_cnt--;
503}
504
505static void close_first_dso(void);
506
507static int do_open(char *name)
508{
509 int fd;
510 char sbuf[STRERR_BUFSIZE];
511
512 do {
513 fd = open(name, O_RDONLY|O_CLOEXEC);
514 if (fd >= 0)
515 return fd;
516
517 pr_debug("dso open failed: %s\n",
518 str_error_r(errno, sbuf, sizeof(sbuf)));
519 if (!dso__data_open_cnt || errno != EMFILE)
520 break;
521
522 close_first_dso();
523 } while (1);
524
525 return -1;
526}
527
528char *dso__filename_with_chroot(const struct dso *dso, const char *filename)
529{
530 return filename_with_chroot(nsinfo__pid(dso->nsinfo), filename);
531}
532
533static int __open_dso(struct dso *dso, struct machine *machine)
534{
535 int fd = -EINVAL;
536 char *root_dir = (char *)"";
537 char *name = malloc(PATH_MAX);
538 bool decomp = false;
539
540 if (!name)
541 return -ENOMEM;
542
543 mutex_lock(&dso->lock);
544 if (machine)
545 root_dir = machine->root_dir;
546
547 if (dso__read_binary_type_filename(dso, dso->binary_type,
548 root_dir, name, PATH_MAX))
549 goto out;
550
551 if (!is_regular_file(name)) {
552 char *new_name;
553
554 if (errno != ENOENT || dso->nsinfo == NULL)
555 goto out;
556
557 new_name = dso__filename_with_chroot(dso, name);
558 if (!new_name)
559 goto out;
560
561 free(name);
562 name = new_name;
563 }
564
565 if (dso__needs_decompress(dso)) {
566 char newpath[KMOD_DECOMP_LEN];
567 size_t len = sizeof(newpath);
568
569 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
570 fd = -dso->load_errno;
571 goto out;
572 }
573
574 decomp = true;
575 strcpy(name, newpath);
576 }
577
578 fd = do_open(name);
579
580 if (decomp)
581 unlink(name);
582
583out:
584 mutex_unlock(&dso->lock);
585 free(name);
586 return fd;
587}
588
589static void check_data_close(void);
590
591/**
592 * dso_close - Open DSO data file
593 * @dso: dso object
594 *
595 * Open @dso's data file descriptor and updates
596 * list/count of open DSO objects.
597 */
598static int open_dso(struct dso *dso, struct machine *machine)
599{
600 int fd;
601 struct nscookie nsc;
602
603 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) {
604 mutex_lock(&dso->lock);
605 nsinfo__mountns_enter(dso->nsinfo, &nsc);
606 mutex_unlock(&dso->lock);
607 }
608 fd = __open_dso(dso, machine);
609 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
610 nsinfo__mountns_exit(&nsc);
611
612 if (fd >= 0) {
613 dso__list_add(dso);
614 /*
615 * Check if we crossed the allowed number
616 * of opened DSOs and close one if needed.
617 */
618 check_data_close();
619 }
620
621 return fd;
622}
623
624static void close_data_fd(struct dso *dso)
625{
626 if (dso->data.fd >= 0) {
627 close(dso->data.fd);
628 dso->data.fd = -1;
629 dso->data.file_size = 0;
630 dso__list_del(dso);
631 }
632}
633
634/**
635 * dso_close - Close DSO data file
636 * @dso: dso object
637 *
638 * Close @dso's data file descriptor and updates
639 * list/count of open DSO objects.
640 */
641static void close_dso(struct dso *dso)
642{
643 close_data_fd(dso);
644}
645
646static void close_first_dso(void)
647{
648 struct dso *dso;
649
650 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
651 close_dso(dso);
652}
653
654static rlim_t get_fd_limit(void)
655{
656 struct rlimit l;
657 rlim_t limit = 0;
658
659 /* Allow half of the current open fd limit. */
660 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
661 if (l.rlim_cur == RLIM_INFINITY)
662 limit = l.rlim_cur;
663 else
664 limit = l.rlim_cur / 2;
665 } else {
666 pr_err("failed to get fd limit\n");
667 limit = 1;
668 }
669
670 return limit;
671}
672
673static rlim_t fd_limit;
674
675/*
676 * Used only by tests/dso-data.c to reset the environment
677 * for tests. I dont expect we should change this during
678 * standard runtime.
679 */
680void reset_fd_limit(void)
681{
682 fd_limit = 0;
683}
684
685static bool may_cache_fd(void)
686{
687 if (!fd_limit)
688 fd_limit = get_fd_limit();
689
690 if (fd_limit == RLIM_INFINITY)
691 return true;
692
693 return fd_limit > (rlim_t) dso__data_open_cnt;
694}
695
696/*
697 * Check and close LRU dso if we crossed allowed limit
698 * for opened dso file descriptors. The limit is half
699 * of the RLIMIT_NOFILE files opened.
700*/
701static void check_data_close(void)
702{
703 bool cache_fd = may_cache_fd();
704
705 if (!cache_fd)
706 close_first_dso();
707}
708
709/**
710 * dso__data_close - Close DSO data file
711 * @dso: dso object
712 *
713 * External interface to close @dso's data file descriptor.
714 */
715void dso__data_close(struct dso *dso)
716{
717 pthread_mutex_lock(&dso__data_open_lock);
718 close_dso(dso);
719 pthread_mutex_unlock(&dso__data_open_lock);
720}
721
722static void try_to_open_dso(struct dso *dso, struct machine *machine)
723{
724 enum dso_binary_type binary_type_data[] = {
725 DSO_BINARY_TYPE__BUILD_ID_CACHE,
726 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
727 DSO_BINARY_TYPE__NOT_FOUND,
728 };
729 int i = 0;
730
731 if (dso->data.fd >= 0)
732 return;
733
734 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
735 dso->data.fd = open_dso(dso, machine);
736 goto out;
737 }
738
739 do {
740 dso->binary_type = binary_type_data[i++];
741
742 dso->data.fd = open_dso(dso, machine);
743 if (dso->data.fd >= 0)
744 goto out;
745
746 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
747out:
748 if (dso->data.fd >= 0)
749 dso->data.status = DSO_DATA_STATUS_OK;
750 else
751 dso->data.status = DSO_DATA_STATUS_ERROR;
752}
753
754/**
755 * dso__data_get_fd - Get dso's data file descriptor
756 * @dso: dso object
757 * @machine: machine object
758 *
759 * External interface to find dso's file, open it and
760 * returns file descriptor. It should be paired with
761 * dso__data_put_fd() if it returns non-negative value.
762 */
763int dso__data_get_fd(struct dso *dso, struct machine *machine)
764{
765 if (dso->data.status == DSO_DATA_STATUS_ERROR)
766 return -1;
767
768 if (pthread_mutex_lock(&dso__data_open_lock) < 0)
769 return -1;
770
771 try_to_open_dso(dso, machine);
772
773 if (dso->data.fd < 0)
774 pthread_mutex_unlock(&dso__data_open_lock);
775
776 return dso->data.fd;
777}
778
779void dso__data_put_fd(struct dso *dso __maybe_unused)
780{
781 pthread_mutex_unlock(&dso__data_open_lock);
782}
783
784bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
785{
786 u32 flag = 1 << by;
787
788 if (dso->data.status_seen & flag)
789 return true;
790
791 dso->data.status_seen |= flag;
792
793 return false;
794}
795
796#ifdef HAVE_LIBBPF_SUPPORT
797static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
798{
799 struct bpf_prog_info_node *node;
800 ssize_t size = DSO__DATA_CACHE_SIZE;
801 u64 len;
802 u8 *buf;
803
804 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
805 if (!node || !node->info_linear) {
806 dso->data.status = DSO_DATA_STATUS_ERROR;
807 return -1;
808 }
809
810 len = node->info_linear->info.jited_prog_len;
811 buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
812
813 if (offset >= len)
814 return -1;
815
816 size = (ssize_t)min(len - offset, (u64)size);
817 memcpy(data, buf + offset, size);
818 return size;
819}
820
821static int bpf_size(struct dso *dso)
822{
823 struct bpf_prog_info_node *node;
824
825 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
826 if (!node || !node->info_linear) {
827 dso->data.status = DSO_DATA_STATUS_ERROR;
828 return -1;
829 }
830
831 dso->data.file_size = node->info_linear->info.jited_prog_len;
832 return 0;
833}
834#endif // HAVE_LIBBPF_SUPPORT
835
836static void
837dso_cache__free(struct dso *dso)
838{
839 struct rb_root *root = &dso->data.cache;
840 struct rb_node *next = rb_first(root);
841
842 mutex_lock(&dso->lock);
843 while (next) {
844 struct dso_cache *cache;
845
846 cache = rb_entry(next, struct dso_cache, rb_node);
847 next = rb_next(&cache->rb_node);
848 rb_erase(&cache->rb_node, root);
849 free(cache);
850 }
851 mutex_unlock(&dso->lock);
852}
853
854static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
855{
856 const struct rb_root *root = &dso->data.cache;
857 struct rb_node * const *p = &root->rb_node;
858 const struct rb_node *parent = NULL;
859 struct dso_cache *cache;
860
861 while (*p != NULL) {
862 u64 end;
863
864 parent = *p;
865 cache = rb_entry(parent, struct dso_cache, rb_node);
866 end = cache->offset + DSO__DATA_CACHE_SIZE;
867
868 if (offset < cache->offset)
869 p = &(*p)->rb_left;
870 else if (offset >= end)
871 p = &(*p)->rb_right;
872 else
873 return cache;
874 }
875
876 return NULL;
877}
878
879static struct dso_cache *
880dso_cache__insert(struct dso *dso, struct dso_cache *new)
881{
882 struct rb_root *root = &dso->data.cache;
883 struct rb_node **p = &root->rb_node;
884 struct rb_node *parent = NULL;
885 struct dso_cache *cache;
886 u64 offset = new->offset;
887
888 mutex_lock(&dso->lock);
889 while (*p != NULL) {
890 u64 end;
891
892 parent = *p;
893 cache = rb_entry(parent, struct dso_cache, rb_node);
894 end = cache->offset + DSO__DATA_CACHE_SIZE;
895
896 if (offset < cache->offset)
897 p = &(*p)->rb_left;
898 else if (offset >= end)
899 p = &(*p)->rb_right;
900 else
901 goto out;
902 }
903
904 rb_link_node(&new->rb_node, parent, p);
905 rb_insert_color(&new->rb_node, root);
906
907 cache = NULL;
908out:
909 mutex_unlock(&dso->lock);
910 return cache;
911}
912
913static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data,
914 u64 size, bool out)
915{
916 u64 cache_offset = offset - cache->offset;
917 u64 cache_size = min(cache->size - cache_offset, size);
918
919 if (out)
920 memcpy(data, cache->data + cache_offset, cache_size);
921 else
922 memcpy(cache->data + cache_offset, data, cache_size);
923 return cache_size;
924}
925
926static ssize_t file_read(struct dso *dso, struct machine *machine,
927 u64 offset, char *data)
928{
929 ssize_t ret;
930
931 pthread_mutex_lock(&dso__data_open_lock);
932
933 /*
934 * dso->data.fd might be closed if other thread opened another
935 * file (dso) due to open file limit (RLIMIT_NOFILE).
936 */
937 try_to_open_dso(dso, machine);
938
939 if (dso->data.fd < 0) {
940 dso->data.status = DSO_DATA_STATUS_ERROR;
941 ret = -errno;
942 goto out;
943 }
944
945 ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset);
946out:
947 pthread_mutex_unlock(&dso__data_open_lock);
948 return ret;
949}
950
951static struct dso_cache *dso_cache__populate(struct dso *dso,
952 struct machine *machine,
953 u64 offset, ssize_t *ret)
954{
955 u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
956 struct dso_cache *cache;
957 struct dso_cache *old;
958
959 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
960 if (!cache) {
961 *ret = -ENOMEM;
962 return NULL;
963 }
964#ifdef HAVE_LIBBPF_SUPPORT
965 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
966 *ret = bpf_read(dso, cache_offset, cache->data);
967 else
968#endif
969 if (dso->binary_type == DSO_BINARY_TYPE__OOL)
970 *ret = DSO__DATA_CACHE_SIZE;
971 else
972 *ret = file_read(dso, machine, cache_offset, cache->data);
973
974 if (*ret <= 0) {
975 free(cache);
976 return NULL;
977 }
978
979 cache->offset = cache_offset;
980 cache->size = *ret;
981
982 old = dso_cache__insert(dso, cache);
983 if (old) {
984 /* we lose the race */
985 free(cache);
986 cache = old;
987 }
988
989 return cache;
990}
991
992static struct dso_cache *dso_cache__find(struct dso *dso,
993 struct machine *machine,
994 u64 offset,
995 ssize_t *ret)
996{
997 struct dso_cache *cache = __dso_cache__find(dso, offset);
998
999 return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
1000}
1001
1002static ssize_t dso_cache_io(struct dso *dso, struct machine *machine,
1003 u64 offset, u8 *data, ssize_t size, bool out)
1004{
1005 struct dso_cache *cache;
1006 ssize_t ret = 0;
1007
1008 cache = dso_cache__find(dso, machine, offset, &ret);
1009 if (!cache)
1010 return ret;
1011
1012 return dso_cache__memcpy(cache, offset, data, size, out);
1013}
1014
1015/*
1016 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
1017 * in the rb_tree. Any read to already cached data is served
1018 * by cached data. Writes update the cache only, not the backing file.
1019 */
1020static ssize_t cached_io(struct dso *dso, struct machine *machine,
1021 u64 offset, u8 *data, ssize_t size, bool out)
1022{
1023 ssize_t r = 0;
1024 u8 *p = data;
1025
1026 do {
1027 ssize_t ret;
1028
1029 ret = dso_cache_io(dso, machine, offset, p, size, out);
1030 if (ret < 0)
1031 return ret;
1032
1033 /* Reached EOF, return what we have. */
1034 if (!ret)
1035 break;
1036
1037 BUG_ON(ret > size);
1038
1039 r += ret;
1040 p += ret;
1041 offset += ret;
1042 size -= ret;
1043
1044 } while (size);
1045
1046 return r;
1047}
1048
1049static int file_size(struct dso *dso, struct machine *machine)
1050{
1051 int ret = 0;
1052 struct stat st;
1053 char sbuf[STRERR_BUFSIZE];
1054
1055 pthread_mutex_lock(&dso__data_open_lock);
1056
1057 /*
1058 * dso->data.fd might be closed if other thread opened another
1059 * file (dso) due to open file limit (RLIMIT_NOFILE).
1060 */
1061 try_to_open_dso(dso, machine);
1062
1063 if (dso->data.fd < 0) {
1064 ret = -errno;
1065 dso->data.status = DSO_DATA_STATUS_ERROR;
1066 goto out;
1067 }
1068
1069 if (fstat(dso->data.fd, &st) < 0) {
1070 ret = -errno;
1071 pr_err("dso cache fstat failed: %s\n",
1072 str_error_r(errno, sbuf, sizeof(sbuf)));
1073 dso->data.status = DSO_DATA_STATUS_ERROR;
1074 goto out;
1075 }
1076 dso->data.file_size = st.st_size;
1077
1078out:
1079 pthread_mutex_unlock(&dso__data_open_lock);
1080 return ret;
1081}
1082
1083int dso__data_file_size(struct dso *dso, struct machine *machine)
1084{
1085 if (dso->data.file_size)
1086 return 0;
1087
1088 if (dso->data.status == DSO_DATA_STATUS_ERROR)
1089 return -1;
1090#ifdef HAVE_LIBBPF_SUPPORT
1091 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
1092 return bpf_size(dso);
1093#endif
1094 return file_size(dso, machine);
1095}
1096
1097/**
1098 * dso__data_size - Return dso data size
1099 * @dso: dso object
1100 * @machine: machine object
1101 *
1102 * Return: dso data size
1103 */
1104off_t dso__data_size(struct dso *dso, struct machine *machine)
1105{
1106 if (dso__data_file_size(dso, machine))
1107 return -1;
1108
1109 /* For now just estimate dso data size is close to file size */
1110 return dso->data.file_size;
1111}
1112
1113static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
1114 u64 offset, u8 *data, ssize_t size,
1115 bool out)
1116{
1117 if (dso__data_file_size(dso, machine))
1118 return -1;
1119
1120 /* Check the offset sanity. */
1121 if (offset > dso->data.file_size)
1122 return -1;
1123
1124 if (offset + size < offset)
1125 return -1;
1126
1127 return cached_io(dso, machine, offset, data, size, out);
1128}
1129
1130/**
1131 * dso__data_read_offset - Read data from dso file offset
1132 * @dso: dso object
1133 * @machine: machine object
1134 * @offset: file offset
1135 * @data: buffer to store data
1136 * @size: size of the @data buffer
1137 *
1138 * External interface to read data from dso file offset. Open
1139 * dso data file and use cached_read to get the data.
1140 */
1141ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
1142 u64 offset, u8 *data, ssize_t size)
1143{
1144 if (dso->data.status == DSO_DATA_STATUS_ERROR)
1145 return -1;
1146
1147 return data_read_write_offset(dso, machine, offset, data, size, true);
1148}
1149
1150/**
1151 * dso__data_read_addr - Read data from dso address
1152 * @dso: dso object
1153 * @machine: machine object
1154 * @add: virtual memory address
1155 * @data: buffer to store data
1156 * @size: size of the @data buffer
1157 *
1158 * External interface to read data from dso address.
1159 */
1160ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
1161 struct machine *machine, u64 addr,
1162 u8 *data, ssize_t size)
1163{
1164 u64 offset = map__map_ip(map, addr);
1165
1166 return dso__data_read_offset(dso, machine, offset, data, size);
1167}
1168
1169/**
1170 * dso__data_write_cache_offs - Write data to dso data cache at file offset
1171 * @dso: dso object
1172 * @machine: machine object
1173 * @offset: file offset
1174 * @data: buffer to write
1175 * @size: size of the @data buffer
1176 *
1177 * Write into the dso file data cache, but do not change the file itself.
1178 */
1179ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
1180 u64 offset, const u8 *data_in, ssize_t size)
1181{
1182 u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */
1183
1184 if (dso->data.status == DSO_DATA_STATUS_ERROR)
1185 return -1;
1186
1187 return data_read_write_offset(dso, machine, offset, data, size, false);
1188}
1189
1190/**
1191 * dso__data_write_cache_addr - Write data to dso data cache at dso address
1192 * @dso: dso object
1193 * @machine: machine object
1194 * @add: virtual memory address
1195 * @data: buffer to write
1196 * @size: size of the @data buffer
1197 *
1198 * External interface to write into the dso file data cache, but do not change
1199 * the file itself.
1200 */
1201ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
1202 struct machine *machine, u64 addr,
1203 const u8 *data, ssize_t size)
1204{
1205 u64 offset = map__map_ip(map, addr);
1206
1207 return dso__data_write_cache_offs(dso, machine, offset, data, size);
1208}
1209
1210struct map *dso__new_map(const char *name)
1211{
1212 struct map *map = NULL;
1213 struct dso *dso = dso__new(name);
1214
1215 if (dso) {
1216 map = map__new2(0, dso);
1217 dso__put(dso);
1218 }
1219
1220 return map;
1221}
1222
1223struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1224 const char *short_name, int dso_type)
1225{
1226 /*
1227 * The kernel dso could be created by build_id processing.
1228 */
1229 struct dso *dso = machine__findnew_dso(machine, name);
1230
1231 /*
1232 * We need to run this in all cases, since during the build_id
1233 * processing we had no idea this was the kernel dso.
1234 */
1235 if (dso != NULL) {
1236 dso__set_short_name(dso, short_name, false);
1237 dso->kernel = dso_type;
1238 }
1239
1240 return dso;
1241}
1242
1243static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated)
1244{
1245 struct rb_root *root = dso->root;
1246
1247 if (name == NULL)
1248 return;
1249
1250 if (dso->long_name_allocated)
1251 free((char *)dso->long_name);
1252
1253 if (root) {
1254 rb_erase(&dso->rb_node, root);
1255 /*
1256 * __dsos__findnew_link_by_longname_id() isn't guaranteed to
1257 * add it back, so a clean removal is required here.
1258 */
1259 RB_CLEAR_NODE(&dso->rb_node);
1260 dso->root = NULL;
1261 }
1262
1263 dso->long_name = name;
1264 dso->long_name_len = strlen(name);
1265 dso->long_name_allocated = name_allocated;
1266
1267 if (root)
1268 __dsos__findnew_link_by_longname_id(root, dso, NULL, id);
1269}
1270
1271void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1272{
1273 dso__set_long_name_id(dso, name, NULL, name_allocated);
1274}
1275
1276void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1277{
1278 if (name == NULL)
1279 return;
1280
1281 if (dso->short_name_allocated)
1282 free((char *)dso->short_name);
1283
1284 dso->short_name = name;
1285 dso->short_name_len = strlen(name);
1286 dso->short_name_allocated = name_allocated;
1287}
1288
1289int dso__name_len(const struct dso *dso)
1290{
1291 if (!dso)
1292 return strlen("[unknown]");
1293 if (verbose > 0)
1294 return dso->long_name_len;
1295
1296 return dso->short_name_len;
1297}
1298
1299bool dso__loaded(const struct dso *dso)
1300{
1301 return dso->loaded;
1302}
1303
1304bool dso__sorted_by_name(const struct dso *dso)
1305{
1306 return dso->sorted_by_name;
1307}
1308
1309void dso__set_sorted_by_name(struct dso *dso)
1310{
1311 dso->sorted_by_name = true;
1312}
1313
1314struct dso *dso__new_id(const char *name, struct dso_id *id)
1315{
1316 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1317
1318 if (dso != NULL) {
1319 strcpy(dso->name, name);
1320 if (id)
1321 dso->id = *id;
1322 dso__set_long_name_id(dso, dso->name, id, false);
1323 dso__set_short_name(dso, dso->name, false);
1324 dso->symbols = RB_ROOT_CACHED;
1325 dso->symbol_names = NULL;
1326 dso->symbol_names_len = 0;
1327 dso->data.cache = RB_ROOT;
1328 dso->inlined_nodes = RB_ROOT_CACHED;
1329 dso->srclines = RB_ROOT_CACHED;
1330 dso->data.fd = -1;
1331 dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1332 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1333 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1334 dso->is_64_bit = (sizeof(void *) == 8);
1335 dso->loaded = 0;
1336 dso->rel = 0;
1337 dso->sorted_by_name = 0;
1338 dso->has_build_id = 0;
1339 dso->has_srcline = 1;
1340 dso->a2l_fails = 1;
1341 dso->kernel = DSO_SPACE__USER;
1342 dso->is_kmod = 0;
1343 dso->needs_swap = DSO_SWAP__UNSET;
1344 dso->comp = COMP_ID__NONE;
1345 RB_CLEAR_NODE(&dso->rb_node);
1346 dso->root = NULL;
1347 INIT_LIST_HEAD(&dso->node);
1348 INIT_LIST_HEAD(&dso->data.open_entry);
1349 mutex_init(&dso->lock);
1350 refcount_set(&dso->refcnt, 1);
1351 }
1352
1353 return dso;
1354}
1355
1356struct dso *dso__new(const char *name)
1357{
1358 return dso__new_id(name, NULL);
1359}
1360
1361void dso__delete(struct dso *dso)
1362{
1363 if (!RB_EMPTY_NODE(&dso->rb_node))
1364 pr_err("DSO %s is still in rbtree when being deleted!\n",
1365 dso->long_name);
1366
1367 /* free inlines first, as they reference symbols */
1368 inlines__tree_delete(&dso->inlined_nodes);
1369 srcline__tree_delete(&dso->srclines);
1370 symbols__delete(&dso->symbols);
1371 dso->symbol_names_len = 0;
1372 zfree(&dso->symbol_names);
1373 if (dso->short_name_allocated) {
1374 zfree((char **)&dso->short_name);
1375 dso->short_name_allocated = false;
1376 }
1377
1378 if (dso->long_name_allocated) {
1379 zfree((char **)&dso->long_name);
1380 dso->long_name_allocated = false;
1381 }
1382
1383 dso__data_close(dso);
1384 auxtrace_cache__free(dso->auxtrace_cache);
1385 dso_cache__free(dso);
1386 dso__free_a2l(dso);
1387 zfree(&dso->symsrc_filename);
1388 nsinfo__zput(dso->nsinfo);
1389 mutex_destroy(&dso->lock);
1390 free(dso);
1391}
1392
1393struct dso *dso__get(struct dso *dso)
1394{
1395 if (dso)
1396 refcount_inc(&dso->refcnt);
1397 return dso;
1398}
1399
1400void dso__put(struct dso *dso)
1401{
1402 if (dso && refcount_dec_and_test(&dso->refcnt))
1403 dso__delete(dso);
1404}
1405
1406void dso__set_build_id(struct dso *dso, struct build_id *bid)
1407{
1408 dso->bid = *bid;
1409 dso->has_build_id = 1;
1410}
1411
1412bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
1413{
1414 if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) {
1415 /*
1416 * For the backward compatibility, it allows a build-id has
1417 * trailing zeros.
1418 */
1419 return !memcmp(dso->bid.data, bid->data, bid->size) &&
1420 !memchr_inv(&dso->bid.data[bid->size], 0,
1421 dso->bid.size - bid->size);
1422 }
1423
1424 return dso->bid.size == bid->size &&
1425 memcmp(dso->bid.data, bid->data, dso->bid.size) == 0;
1426}
1427
1428void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1429{
1430 char path[PATH_MAX];
1431
1432 if (machine__is_default_guest(machine))
1433 return;
1434 sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1435 if (sysfs__read_build_id(path, &dso->bid) == 0)
1436 dso->has_build_id = true;
1437}
1438
1439int dso__kernel_module_get_build_id(struct dso *dso,
1440 const char *root_dir)
1441{
1442 char filename[PATH_MAX];
1443 /*
1444 * kernel module short names are of the form "[module]" and
1445 * we need just "module" here.
1446 */
1447 const char *name = dso->short_name + 1;
1448
1449 snprintf(filename, sizeof(filename),
1450 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1451 root_dir, (int)strlen(name) - 1, name);
1452
1453 if (sysfs__read_build_id(filename, &dso->bid) == 0)
1454 dso->has_build_id = true;
1455
1456 return 0;
1457}
1458
1459static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1460{
1461 char sbuild_id[SBUILD_ID_SIZE];
1462
1463 build_id__sprintf(&dso->bid, sbuild_id);
1464 return fprintf(fp, "%s", sbuild_id);
1465}
1466
1467size_t dso__fprintf(struct dso *dso, FILE *fp)
1468{
1469 struct rb_node *nd;
1470 size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1471
1472 if (dso->short_name != dso->long_name)
1473 ret += fprintf(fp, "%s, ", dso->long_name);
1474 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
1475 ret += dso__fprintf_buildid(dso, fp);
1476 ret += fprintf(fp, ")\n");
1477 for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
1478 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1479 ret += symbol__fprintf(pos, fp);
1480 }
1481
1482 return ret;
1483}
1484
1485enum dso_type dso__type(struct dso *dso, struct machine *machine)
1486{
1487 int fd;
1488 enum dso_type type = DSO__TYPE_UNKNOWN;
1489
1490 fd = dso__data_get_fd(dso, machine);
1491 if (fd >= 0) {
1492 type = dso__type_fd(fd);
1493 dso__data_put_fd(dso);
1494 }
1495
1496 return type;
1497}
1498
1499int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1500{
1501 int idx, errnum = dso->load_errno;
1502 /*
1503 * This must have a same ordering as the enum dso_load_errno.
1504 */
1505 static const char *dso_load__error_str[] = {
1506 "Internal tools/perf/ library error",
1507 "Invalid ELF file",
1508 "Can not read build id",
1509 "Mismatching build id",
1510 "Decompression failure",
1511 };
1512
1513 BUG_ON(buflen == 0);
1514
1515 if (errnum >= 0) {
1516 const char *err = str_error_r(errnum, buf, buflen);
1517
1518 if (err != buf)
1519 scnprintf(buf, buflen, "%s", err);
1520
1521 return 0;
1522 }
1523
1524 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1525 return -1;
1526
1527 idx = errnum - __DSO_LOAD_ERRNO__START;
1528 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1529 return 0;
1530}