at v4.8-rc1 1383 lines 32 kB view raw
1#include <asm/bug.h> 2#include <sys/time.h> 3#include <sys/resource.h> 4#include "symbol.h" 5#include "dso.h" 6#include "machine.h" 7#include "auxtrace.h" 8#include "util.h" 9#include "debug.h" 10#include "vdso.h" 11 12char dso__symtab_origin(const struct dso *dso) 13{ 14 static const char origin[] = { 15 [DSO_BINARY_TYPE__KALLSYMS] = 'k', 16 [DSO_BINARY_TYPE__VMLINUX] = 'v', 17 [DSO_BINARY_TYPE__JAVA_JIT] = 'j', 18 [DSO_BINARY_TYPE__DEBUGLINK] = 'l', 19 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', 20 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', 21 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', 22 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', 23 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', 24 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', 25 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', 26 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm', 27 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', 28 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', 29 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M', 30 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', 31 }; 32 33 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) 34 return '!'; 35 return origin[dso->symtab_type]; 36} 37 38int dso__read_binary_type_filename(const struct dso *dso, 39 enum dso_binary_type type, 40 char *root_dir, char *filename, size_t size) 41{ 42 char build_id_hex[SBUILD_ID_SIZE]; 43 int ret = 0; 44 size_t len; 45 46 switch (type) { 47 case DSO_BINARY_TYPE__DEBUGLINK: { 48 char *debuglink; 49 50 len = __symbol__join_symfs(filename, size, dso->long_name); 51 debuglink = filename + len; 52 while (debuglink != filename && *debuglink != '/') 53 debuglink--; 54 if (*debuglink == '/') 55 debuglink++; 56 57 ret = -1; 58 if (!is_regular_file(filename)) 59 break; 60 61 ret = filename__read_debuglink(filename, debuglink, 62 size - (debuglink - filename)); 63 } 64 break; 65 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 66 if (dso__build_id_filename(dso, filename, size) == NULL) 67 ret = -1; 68 break; 69 70 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 71 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 72 snprintf(filename + len, size - len, "%s.debug", dso->long_name); 73 break; 74 75 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 76 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 77 snprintf(filename + len, size - len, "%s", dso->long_name); 78 break; 79 80 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 81 { 82 const char *last_slash; 83 size_t dir_size; 84 85 last_slash = dso->long_name + dso->long_name_len; 86 while (last_slash != dso->long_name && *last_slash != '/') 87 last_slash--; 88 89 len = __symbol__join_symfs(filename, size, ""); 90 dir_size = last_slash - dso->long_name + 2; 91 if (dir_size > (size - len)) { 92 ret = -1; 93 break; 94 } 95 len += scnprintf(filename + len, dir_size, "%s", dso->long_name); 96 len += scnprintf(filename + len , size - len, ".debug%s", 97 last_slash); 98 break; 99 } 100 101 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 102 if (!dso->has_build_id) { 103 ret = -1; 104 break; 105 } 106 107 build_id__sprintf(dso->build_id, 108 sizeof(dso->build_id), 109 build_id_hex); 110 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); 111 snprintf(filename + len, size - len, "%.2s/%s.debug", 112 build_id_hex, build_id_hex + 2); 113 break; 114 115 case DSO_BINARY_TYPE__VMLINUX: 116 case DSO_BINARY_TYPE__GUEST_VMLINUX: 117 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 118 __symbol__join_symfs(filename, size, dso->long_name); 119 break; 120 121 case DSO_BINARY_TYPE__GUEST_KMODULE: 122 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 123 path__join3(filename, size, symbol_conf.symfs, 124 root_dir, dso->long_name); 125 break; 126 127 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 128 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 129 __symbol__join_symfs(filename, size, dso->long_name); 130 break; 131 132 case DSO_BINARY_TYPE__KCORE: 133 case DSO_BINARY_TYPE__GUEST_KCORE: 134 snprintf(filename, size, "%s", dso->long_name); 135 break; 136 137 default: 138 case DSO_BINARY_TYPE__KALLSYMS: 139 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 140 case DSO_BINARY_TYPE__JAVA_JIT: 141 case DSO_BINARY_TYPE__NOT_FOUND: 142 ret = -1; 143 break; 144 } 145 146 return ret; 147} 148 149static const struct { 150 const char *fmt; 151 int (*decompress)(const char *input, int output); 152} compressions[] = { 153#ifdef HAVE_ZLIB_SUPPORT 154 { "gz", gzip_decompress_to_file }, 155#endif 156#ifdef HAVE_LZMA_SUPPORT 157 { "xz", lzma_decompress_to_file }, 158#endif 159 { NULL, NULL }, 160}; 161 162bool is_supported_compression(const char *ext) 163{ 164 unsigned i; 165 166 for (i = 0; compressions[i].fmt; i++) { 167 if (!strcmp(ext, compressions[i].fmt)) 168 return true; 169 } 170 return false; 171} 172 173bool is_kernel_module(const char *pathname, int cpumode) 174{ 175 struct kmod_path m; 176 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK; 177 178 WARN_ONCE(mode != cpumode, 179 "Internal error: passing unmasked cpumode (%x) to is_kernel_module", 180 cpumode); 181 182 switch (mode) { 183 case PERF_RECORD_MISC_USER: 184 case PERF_RECORD_MISC_HYPERVISOR: 185 case PERF_RECORD_MISC_GUEST_USER: 186 return false; 187 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */ 188 default: 189 if (kmod_path__parse(&m, pathname)) { 190 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.", 191 pathname); 192 return true; 193 } 194 } 195 196 return m.kmod; 197} 198 199bool decompress_to_file(const char *ext, const char *filename, int output_fd) 200{ 201 unsigned i; 202 203 for (i = 0; compressions[i].fmt; i++) { 204 if (!strcmp(ext, compressions[i].fmt)) 205 return !compressions[i].decompress(filename, 206 output_fd); 207 } 208 return false; 209} 210 211bool dso__needs_decompress(struct dso *dso) 212{ 213 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 214 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 215} 216 217/* 218 * Parses kernel module specified in @path and updates 219 * @m argument like: 220 * 221 * @comp - true if @path contains supported compression suffix, 222 * false otherwise 223 * @kmod - true if @path contains '.ko' suffix in right position, 224 * false otherwise 225 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name 226 * of the kernel module without suffixes, otherwise strudup-ed 227 * base name of @path 228 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string 229 * the compression suffix 230 * 231 * Returns 0 if there's no strdup error, -ENOMEM otherwise. 232 */ 233int __kmod_path__parse(struct kmod_path *m, const char *path, 234 bool alloc_name, bool alloc_ext) 235{ 236 const char *name = strrchr(path, '/'); 237 const char *ext = strrchr(path, '.'); 238 bool is_simple_name = false; 239 240 memset(m, 0x0, sizeof(*m)); 241 name = name ? name + 1 : path; 242 243 /* 244 * '.' is also a valid character for module name. For example: 245 * [aaa.bbb] is a valid module name. '[' should have higher 246 * priority than '.ko' suffix. 247 * 248 * The kernel names are from machine__mmap_name. Such 249 * name should belong to kernel itself, not kernel module. 250 */ 251 if (name[0] == '[') { 252 is_simple_name = true; 253 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || 254 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || 255 (strncmp(name, "[vdso]", 6) == 0) || 256 (strncmp(name, "[vsyscall]", 10) == 0)) { 257 m->kmod = false; 258 259 } else 260 m->kmod = true; 261 } 262 263 /* No extension, just return name. */ 264 if ((ext == NULL) || is_simple_name) { 265 if (alloc_name) { 266 m->name = strdup(name); 267 return m->name ? 0 : -ENOMEM; 268 } 269 return 0; 270 } 271 272 if (is_supported_compression(ext + 1)) { 273 m->comp = true; 274 ext -= 3; 275 } 276 277 /* Check .ko extension only if there's enough name left. */ 278 if (ext > name) 279 m->kmod = !strncmp(ext, ".ko", 3); 280 281 if (alloc_name) { 282 if (m->kmod) { 283 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1) 284 return -ENOMEM; 285 } else { 286 if (asprintf(&m->name, "%s", name) == -1) 287 return -ENOMEM; 288 } 289 290 strxfrchar(m->name, '-', '_'); 291 } 292 293 if (alloc_ext && m->comp) { 294 m->ext = strdup(ext + 4); 295 if (!m->ext) { 296 free((void *) m->name); 297 return -ENOMEM; 298 } 299 } 300 301 return 0; 302} 303 304/* 305 * Global list of open DSOs and the counter. 306 */ 307static LIST_HEAD(dso__data_open); 308static long dso__data_open_cnt; 309static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER; 310 311static void dso__list_add(struct dso *dso) 312{ 313 list_add_tail(&dso->data.open_entry, &dso__data_open); 314 dso__data_open_cnt++; 315} 316 317static void dso__list_del(struct dso *dso) 318{ 319 list_del(&dso->data.open_entry); 320 WARN_ONCE(dso__data_open_cnt <= 0, 321 "DSO data fd counter out of bounds."); 322 dso__data_open_cnt--; 323} 324 325static void close_first_dso(void); 326 327static int do_open(char *name) 328{ 329 int fd; 330 char sbuf[STRERR_BUFSIZE]; 331 332 do { 333 fd = open(name, O_RDONLY); 334 if (fd >= 0) 335 return fd; 336 337 pr_debug("dso open failed: %s\n", 338 str_error_r(errno, sbuf, sizeof(sbuf))); 339 if (!dso__data_open_cnt || errno != EMFILE) 340 break; 341 342 close_first_dso(); 343 } while (1); 344 345 return -1; 346} 347 348static int __open_dso(struct dso *dso, struct machine *machine) 349{ 350 int fd; 351 char *root_dir = (char *)""; 352 char *name = malloc(PATH_MAX); 353 354 if (!name) 355 return -ENOMEM; 356 357 if (machine) 358 root_dir = machine->root_dir; 359 360 if (dso__read_binary_type_filename(dso, dso->binary_type, 361 root_dir, name, PATH_MAX)) { 362 free(name); 363 return -EINVAL; 364 } 365 366 fd = do_open(name); 367 free(name); 368 return fd; 369} 370 371static void check_data_close(void); 372 373/** 374 * dso_close - Open DSO data file 375 * @dso: dso object 376 * 377 * Open @dso's data file descriptor and updates 378 * list/count of open DSO objects. 379 */ 380static int open_dso(struct dso *dso, struct machine *machine) 381{ 382 int fd = __open_dso(dso, machine); 383 384 if (fd >= 0) { 385 dso__list_add(dso); 386 /* 387 * Check if we crossed the allowed number 388 * of opened DSOs and close one if needed. 389 */ 390 check_data_close(); 391 } 392 393 return fd; 394} 395 396static void close_data_fd(struct dso *dso) 397{ 398 if (dso->data.fd >= 0) { 399 close(dso->data.fd); 400 dso->data.fd = -1; 401 dso->data.file_size = 0; 402 dso__list_del(dso); 403 } 404} 405 406/** 407 * dso_close - Close DSO data file 408 * @dso: dso object 409 * 410 * Close @dso's data file descriptor and updates 411 * list/count of open DSO objects. 412 */ 413static void close_dso(struct dso *dso) 414{ 415 close_data_fd(dso); 416} 417 418static void close_first_dso(void) 419{ 420 struct dso *dso; 421 422 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry); 423 close_dso(dso); 424} 425 426static rlim_t get_fd_limit(void) 427{ 428 struct rlimit l; 429 rlim_t limit = 0; 430 431 /* Allow half of the current open fd limit. */ 432 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 433 if (l.rlim_cur == RLIM_INFINITY) 434 limit = l.rlim_cur; 435 else 436 limit = l.rlim_cur / 2; 437 } else { 438 pr_err("failed to get fd limit\n"); 439 limit = 1; 440 } 441 442 return limit; 443} 444 445static rlim_t fd_limit; 446 447/* 448 * Used only by tests/dso-data.c to reset the environment 449 * for tests. I dont expect we should change this during 450 * standard runtime. 451 */ 452void reset_fd_limit(void) 453{ 454 fd_limit = 0; 455} 456 457static bool may_cache_fd(void) 458{ 459 if (!fd_limit) 460 fd_limit = get_fd_limit(); 461 462 if (fd_limit == RLIM_INFINITY) 463 return true; 464 465 return fd_limit > (rlim_t) dso__data_open_cnt; 466} 467 468/* 469 * Check and close LRU dso if we crossed allowed limit 470 * for opened dso file descriptors. The limit is half 471 * of the RLIMIT_NOFILE files opened. 472*/ 473static void check_data_close(void) 474{ 475 bool cache_fd = may_cache_fd(); 476 477 if (!cache_fd) 478 close_first_dso(); 479} 480 481/** 482 * dso__data_close - Close DSO data file 483 * @dso: dso object 484 * 485 * External interface to close @dso's data file descriptor. 486 */ 487void dso__data_close(struct dso *dso) 488{ 489 pthread_mutex_lock(&dso__data_open_lock); 490 close_dso(dso); 491 pthread_mutex_unlock(&dso__data_open_lock); 492} 493 494static void try_to_open_dso(struct dso *dso, struct machine *machine) 495{ 496 enum dso_binary_type binary_type_data[] = { 497 DSO_BINARY_TYPE__BUILD_ID_CACHE, 498 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 499 DSO_BINARY_TYPE__NOT_FOUND, 500 }; 501 int i = 0; 502 503 if (dso->data.fd >= 0) 504 return; 505 506 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { 507 dso->data.fd = open_dso(dso, machine); 508 goto out; 509 } 510 511 do { 512 dso->binary_type = binary_type_data[i++]; 513 514 dso->data.fd = open_dso(dso, machine); 515 if (dso->data.fd >= 0) 516 goto out; 517 518 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND); 519out: 520 if (dso->data.fd >= 0) 521 dso->data.status = DSO_DATA_STATUS_OK; 522 else 523 dso->data.status = DSO_DATA_STATUS_ERROR; 524} 525 526/** 527 * dso__data_get_fd - Get dso's data file descriptor 528 * @dso: dso object 529 * @machine: machine object 530 * 531 * External interface to find dso's file, open it and 532 * returns file descriptor. It should be paired with 533 * dso__data_put_fd() if it returns non-negative value. 534 */ 535int dso__data_get_fd(struct dso *dso, struct machine *machine) 536{ 537 if (dso->data.status == DSO_DATA_STATUS_ERROR) 538 return -1; 539 540 if (pthread_mutex_lock(&dso__data_open_lock) < 0) 541 return -1; 542 543 try_to_open_dso(dso, machine); 544 545 if (dso->data.fd < 0) 546 pthread_mutex_unlock(&dso__data_open_lock); 547 548 return dso->data.fd; 549} 550 551void dso__data_put_fd(struct dso *dso __maybe_unused) 552{ 553 pthread_mutex_unlock(&dso__data_open_lock); 554} 555 556bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) 557{ 558 u32 flag = 1 << by; 559 560 if (dso->data.status_seen & flag) 561 return true; 562 563 dso->data.status_seen |= flag; 564 565 return false; 566} 567 568static void 569dso_cache__free(struct dso *dso) 570{ 571 struct rb_root *root = &dso->data.cache; 572 struct rb_node *next = rb_first(root); 573 574 pthread_mutex_lock(&dso->lock); 575 while (next) { 576 struct dso_cache *cache; 577 578 cache = rb_entry(next, struct dso_cache, rb_node); 579 next = rb_next(&cache->rb_node); 580 rb_erase(&cache->rb_node, root); 581 free(cache); 582 } 583 pthread_mutex_unlock(&dso->lock); 584} 585 586static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset) 587{ 588 const struct rb_root *root = &dso->data.cache; 589 struct rb_node * const *p = &root->rb_node; 590 const struct rb_node *parent = NULL; 591 struct dso_cache *cache; 592 593 while (*p != NULL) { 594 u64 end; 595 596 parent = *p; 597 cache = rb_entry(parent, struct dso_cache, rb_node); 598 end = cache->offset + DSO__DATA_CACHE_SIZE; 599 600 if (offset < cache->offset) 601 p = &(*p)->rb_left; 602 else if (offset >= end) 603 p = &(*p)->rb_right; 604 else 605 return cache; 606 } 607 608 return NULL; 609} 610 611static struct dso_cache * 612dso_cache__insert(struct dso *dso, struct dso_cache *new) 613{ 614 struct rb_root *root = &dso->data.cache; 615 struct rb_node **p = &root->rb_node; 616 struct rb_node *parent = NULL; 617 struct dso_cache *cache; 618 u64 offset = new->offset; 619 620 pthread_mutex_lock(&dso->lock); 621 while (*p != NULL) { 622 u64 end; 623 624 parent = *p; 625 cache = rb_entry(parent, struct dso_cache, rb_node); 626 end = cache->offset + DSO__DATA_CACHE_SIZE; 627 628 if (offset < cache->offset) 629 p = &(*p)->rb_left; 630 else if (offset >= end) 631 p = &(*p)->rb_right; 632 else 633 goto out; 634 } 635 636 rb_link_node(&new->rb_node, parent, p); 637 rb_insert_color(&new->rb_node, root); 638 639 cache = NULL; 640out: 641 pthread_mutex_unlock(&dso->lock); 642 return cache; 643} 644 645static ssize_t 646dso_cache__memcpy(struct dso_cache *cache, u64 offset, 647 u8 *data, u64 size) 648{ 649 u64 cache_offset = offset - cache->offset; 650 u64 cache_size = min(cache->size - cache_offset, size); 651 652 memcpy(data, cache->data + cache_offset, cache_size); 653 return cache_size; 654} 655 656static ssize_t 657dso_cache__read(struct dso *dso, struct machine *machine, 658 u64 offset, u8 *data, ssize_t size) 659{ 660 struct dso_cache *cache; 661 struct dso_cache *old; 662 ssize_t ret; 663 664 do { 665 u64 cache_offset; 666 667 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); 668 if (!cache) 669 return -ENOMEM; 670 671 pthread_mutex_lock(&dso__data_open_lock); 672 673 /* 674 * dso->data.fd might be closed if other thread opened another 675 * file (dso) due to open file limit (RLIMIT_NOFILE). 676 */ 677 try_to_open_dso(dso, machine); 678 679 if (dso->data.fd < 0) { 680 ret = -errno; 681 dso->data.status = DSO_DATA_STATUS_ERROR; 682 break; 683 } 684 685 cache_offset = offset & DSO__DATA_CACHE_MASK; 686 687 ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset); 688 if (ret <= 0) 689 break; 690 691 cache->offset = cache_offset; 692 cache->size = ret; 693 } while (0); 694 695 pthread_mutex_unlock(&dso__data_open_lock); 696 697 if (ret > 0) { 698 old = dso_cache__insert(dso, cache); 699 if (old) { 700 /* we lose the race */ 701 free(cache); 702 cache = old; 703 } 704 705 ret = dso_cache__memcpy(cache, offset, data, size); 706 } 707 708 if (ret <= 0) 709 free(cache); 710 711 return ret; 712} 713 714static ssize_t dso_cache_read(struct dso *dso, struct machine *machine, 715 u64 offset, u8 *data, ssize_t size) 716{ 717 struct dso_cache *cache; 718 719 cache = dso_cache__find(dso, offset); 720 if (cache) 721 return dso_cache__memcpy(cache, offset, data, size); 722 else 723 return dso_cache__read(dso, machine, offset, data, size); 724} 725 726/* 727 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks 728 * in the rb_tree. Any read to already cached data is served 729 * by cached data. 730 */ 731static ssize_t cached_read(struct dso *dso, struct machine *machine, 732 u64 offset, u8 *data, ssize_t size) 733{ 734 ssize_t r = 0; 735 u8 *p = data; 736 737 do { 738 ssize_t ret; 739 740 ret = dso_cache_read(dso, machine, offset, p, size); 741 if (ret < 0) 742 return ret; 743 744 /* Reached EOF, return what we have. */ 745 if (!ret) 746 break; 747 748 BUG_ON(ret > size); 749 750 r += ret; 751 p += ret; 752 offset += ret; 753 size -= ret; 754 755 } while (size); 756 757 return r; 758} 759 760static int data_file_size(struct dso *dso, struct machine *machine) 761{ 762 int ret = 0; 763 struct stat st; 764 char sbuf[STRERR_BUFSIZE]; 765 766 if (dso->data.file_size) 767 return 0; 768 769 if (dso->data.status == DSO_DATA_STATUS_ERROR) 770 return -1; 771 772 pthread_mutex_lock(&dso__data_open_lock); 773 774 /* 775 * dso->data.fd might be closed if other thread opened another 776 * file (dso) due to open file limit (RLIMIT_NOFILE). 777 */ 778 try_to_open_dso(dso, machine); 779 780 if (dso->data.fd < 0) { 781 ret = -errno; 782 dso->data.status = DSO_DATA_STATUS_ERROR; 783 goto out; 784 } 785 786 if (fstat(dso->data.fd, &st) < 0) { 787 ret = -errno; 788 pr_err("dso cache fstat failed: %s\n", 789 str_error_r(errno, sbuf, sizeof(sbuf))); 790 dso->data.status = DSO_DATA_STATUS_ERROR; 791 goto out; 792 } 793 dso->data.file_size = st.st_size; 794 795out: 796 pthread_mutex_unlock(&dso__data_open_lock); 797 return ret; 798} 799 800/** 801 * dso__data_size - Return dso data size 802 * @dso: dso object 803 * @machine: machine object 804 * 805 * Return: dso data size 806 */ 807off_t dso__data_size(struct dso *dso, struct machine *machine) 808{ 809 if (data_file_size(dso, machine)) 810 return -1; 811 812 /* For now just estimate dso data size is close to file size */ 813 return dso->data.file_size; 814} 815 816static ssize_t data_read_offset(struct dso *dso, struct machine *machine, 817 u64 offset, u8 *data, ssize_t size) 818{ 819 if (data_file_size(dso, machine)) 820 return -1; 821 822 /* Check the offset sanity. */ 823 if (offset > dso->data.file_size) 824 return -1; 825 826 if (offset + size < offset) 827 return -1; 828 829 return cached_read(dso, machine, offset, data, size); 830} 831 832/** 833 * dso__data_read_offset - Read data from dso file offset 834 * @dso: dso object 835 * @machine: machine object 836 * @offset: file offset 837 * @data: buffer to store data 838 * @size: size of the @data buffer 839 * 840 * External interface to read data from dso file offset. Open 841 * dso data file and use cached_read to get the data. 842 */ 843ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, 844 u64 offset, u8 *data, ssize_t size) 845{ 846 if (dso->data.status == DSO_DATA_STATUS_ERROR) 847 return -1; 848 849 return data_read_offset(dso, machine, offset, data, size); 850} 851 852/** 853 * dso__data_read_addr - Read data from dso address 854 * @dso: dso object 855 * @machine: machine object 856 * @add: virtual memory address 857 * @data: buffer to store data 858 * @size: size of the @data buffer 859 * 860 * External interface to read data from dso address. 861 */ 862ssize_t dso__data_read_addr(struct dso *dso, struct map *map, 863 struct machine *machine, u64 addr, 864 u8 *data, ssize_t size) 865{ 866 u64 offset = map->map_ip(map, addr); 867 return dso__data_read_offset(dso, machine, offset, data, size); 868} 869 870struct map *dso__new_map(const char *name) 871{ 872 struct map *map = NULL; 873 struct dso *dso = dso__new(name); 874 875 if (dso) 876 map = map__new2(0, dso, MAP__FUNCTION); 877 878 return map; 879} 880 881struct dso *machine__findnew_kernel(struct machine *machine, const char *name, 882 const char *short_name, int dso_type) 883{ 884 /* 885 * The kernel dso could be created by build_id processing. 886 */ 887 struct dso *dso = machine__findnew_dso(machine, name); 888 889 /* 890 * We need to run this in all cases, since during the build_id 891 * processing we had no idea this was the kernel dso. 892 */ 893 if (dso != NULL) { 894 dso__set_short_name(dso, short_name, false); 895 dso->kernel = dso_type; 896 } 897 898 return dso; 899} 900 901/* 902 * Find a matching entry and/or link current entry to RB tree. 903 * Either one of the dso or name parameter must be non-NULL or the 904 * function will not work. 905 */ 906static struct dso *__dso__findlink_by_longname(struct rb_root *root, 907 struct dso *dso, const char *name) 908{ 909 struct rb_node **p = &root->rb_node; 910 struct rb_node *parent = NULL; 911 912 if (!name) 913 name = dso->long_name; 914 /* 915 * Find node with the matching name 916 */ 917 while (*p) { 918 struct dso *this = rb_entry(*p, struct dso, rb_node); 919 int rc = strcmp(name, this->long_name); 920 921 parent = *p; 922 if (rc == 0) { 923 /* 924 * In case the new DSO is a duplicate of an existing 925 * one, print an one-time warning & put the new entry 926 * at the end of the list of duplicates. 927 */ 928 if (!dso || (dso == this)) 929 return this; /* Find matching dso */ 930 /* 931 * The core kernel DSOs may have duplicated long name. 932 * In this case, the short name should be different. 933 * Comparing the short names to differentiate the DSOs. 934 */ 935 rc = strcmp(dso->short_name, this->short_name); 936 if (rc == 0) { 937 pr_err("Duplicated dso name: %s\n", name); 938 return NULL; 939 } 940 } 941 if (rc < 0) 942 p = &parent->rb_left; 943 else 944 p = &parent->rb_right; 945 } 946 if (dso) { 947 /* Add new node and rebalance tree */ 948 rb_link_node(&dso->rb_node, parent, p); 949 rb_insert_color(&dso->rb_node, root); 950 dso->root = root; 951 } 952 return NULL; 953} 954 955static inline struct dso *__dso__find_by_longname(struct rb_root *root, 956 const char *name) 957{ 958 return __dso__findlink_by_longname(root, NULL, name); 959} 960 961void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 962{ 963 struct rb_root *root = dso->root; 964 965 if (name == NULL) 966 return; 967 968 if (dso->long_name_allocated) 969 free((char *)dso->long_name); 970 971 if (root) { 972 rb_erase(&dso->rb_node, root); 973 /* 974 * __dso__findlink_by_longname() isn't guaranteed to add it 975 * back, so a clean removal is required here. 976 */ 977 RB_CLEAR_NODE(&dso->rb_node); 978 dso->root = NULL; 979 } 980 981 dso->long_name = name; 982 dso->long_name_len = strlen(name); 983 dso->long_name_allocated = name_allocated; 984 985 if (root) 986 __dso__findlink_by_longname(root, dso, NULL); 987} 988 989void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 990{ 991 if (name == NULL) 992 return; 993 994 if (dso->short_name_allocated) 995 free((char *)dso->short_name); 996 997 dso->short_name = name; 998 dso->short_name_len = strlen(name); 999 dso->short_name_allocated = name_allocated; 1000} 1001 1002static void dso__set_basename(struct dso *dso) 1003{ 1004 /* 1005 * basename() may modify path buffer, so we must pass 1006 * a copy. 1007 */ 1008 char *base, *lname = strdup(dso->long_name); 1009 1010 if (!lname) 1011 return; 1012 1013 /* 1014 * basename() may return a pointer to internal 1015 * storage which is reused in subsequent calls 1016 * so copy the result. 1017 */ 1018 base = strdup(basename(lname)); 1019 1020 free(lname); 1021 1022 if (!base) 1023 return; 1024 1025 dso__set_short_name(dso, base, true); 1026} 1027 1028int dso__name_len(const struct dso *dso) 1029{ 1030 if (!dso) 1031 return strlen("[unknown]"); 1032 if (verbose) 1033 return dso->long_name_len; 1034 1035 return dso->short_name_len; 1036} 1037 1038bool dso__loaded(const struct dso *dso, enum map_type type) 1039{ 1040 return dso->loaded & (1 << type); 1041} 1042 1043bool dso__sorted_by_name(const struct dso *dso, enum map_type type) 1044{ 1045 return dso->sorted_by_name & (1 << type); 1046} 1047 1048void dso__set_sorted_by_name(struct dso *dso, enum map_type type) 1049{ 1050 dso->sorted_by_name |= (1 << type); 1051} 1052 1053struct dso *dso__new(const char *name) 1054{ 1055 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); 1056 1057 if (dso != NULL) { 1058 int i; 1059 strcpy(dso->name, name); 1060 dso__set_long_name(dso, dso->name, false); 1061 dso__set_short_name(dso, dso->name, false); 1062 for (i = 0; i < MAP__NR_TYPES; ++i) 1063 dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; 1064 dso->data.cache = RB_ROOT; 1065 dso->data.fd = -1; 1066 dso->data.status = DSO_DATA_STATUS_UNKNOWN; 1067 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; 1068 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; 1069 dso->is_64_bit = (sizeof(void *) == 8); 1070 dso->loaded = 0; 1071 dso->rel = 0; 1072 dso->sorted_by_name = 0; 1073 dso->has_build_id = 0; 1074 dso->has_srcline = 1; 1075 dso->a2l_fails = 1; 1076 dso->kernel = DSO_TYPE_USER; 1077 dso->needs_swap = DSO_SWAP__UNSET; 1078 RB_CLEAR_NODE(&dso->rb_node); 1079 dso->root = NULL; 1080 INIT_LIST_HEAD(&dso->node); 1081 INIT_LIST_HEAD(&dso->data.open_entry); 1082 pthread_mutex_init(&dso->lock, NULL); 1083 atomic_set(&dso->refcnt, 1); 1084 } 1085 1086 return dso; 1087} 1088 1089void dso__delete(struct dso *dso) 1090{ 1091 int i; 1092 1093 if (!RB_EMPTY_NODE(&dso->rb_node)) 1094 pr_err("DSO %s is still in rbtree when being deleted!\n", 1095 dso->long_name); 1096 for (i = 0; i < MAP__NR_TYPES; ++i) 1097 symbols__delete(&dso->symbols[i]); 1098 1099 if (dso->short_name_allocated) { 1100 zfree((char **)&dso->short_name); 1101 dso->short_name_allocated = false; 1102 } 1103 1104 if (dso->long_name_allocated) { 1105 zfree((char **)&dso->long_name); 1106 dso->long_name_allocated = false; 1107 } 1108 1109 dso__data_close(dso); 1110 auxtrace_cache__free(dso->auxtrace_cache); 1111 dso_cache__free(dso); 1112 dso__free_a2l(dso); 1113 zfree(&dso->symsrc_filename); 1114 pthread_mutex_destroy(&dso->lock); 1115 free(dso); 1116} 1117 1118struct dso *dso__get(struct dso *dso) 1119{ 1120 if (dso) 1121 atomic_inc(&dso->refcnt); 1122 return dso; 1123} 1124 1125void dso__put(struct dso *dso) 1126{ 1127 if (dso && atomic_dec_and_test(&dso->refcnt)) 1128 dso__delete(dso); 1129} 1130 1131void dso__set_build_id(struct dso *dso, void *build_id) 1132{ 1133 memcpy(dso->build_id, build_id, sizeof(dso->build_id)); 1134 dso->has_build_id = 1; 1135} 1136 1137bool dso__build_id_equal(const struct dso *dso, u8 *build_id) 1138{ 1139 return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; 1140} 1141 1142void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) 1143{ 1144 char path[PATH_MAX]; 1145 1146 if (machine__is_default_guest(machine)) 1147 return; 1148 sprintf(path, "%s/sys/kernel/notes", machine->root_dir); 1149 if (sysfs__read_build_id(path, dso->build_id, 1150 sizeof(dso->build_id)) == 0) 1151 dso->has_build_id = true; 1152} 1153 1154int dso__kernel_module_get_build_id(struct dso *dso, 1155 const char *root_dir) 1156{ 1157 char filename[PATH_MAX]; 1158 /* 1159 * kernel module short names are of the form "[module]" and 1160 * we need just "module" here. 1161 */ 1162 const char *name = dso->short_name + 1; 1163 1164 snprintf(filename, sizeof(filename), 1165 "%s/sys/module/%.*s/notes/.note.gnu.build-id", 1166 root_dir, (int)strlen(name) - 1, name); 1167 1168 if (sysfs__read_build_id(filename, dso->build_id, 1169 sizeof(dso->build_id)) == 0) 1170 dso->has_build_id = true; 1171 1172 return 0; 1173} 1174 1175bool __dsos__read_build_ids(struct list_head *head, bool with_hits) 1176{ 1177 bool have_build_id = false; 1178 struct dso *pos; 1179 1180 list_for_each_entry(pos, head, node) { 1181 if (with_hits && !pos->hit && !dso__is_vdso(pos)) 1182 continue; 1183 if (pos->has_build_id) { 1184 have_build_id = true; 1185 continue; 1186 } 1187 if (filename__read_build_id(pos->long_name, pos->build_id, 1188 sizeof(pos->build_id)) > 0) { 1189 have_build_id = true; 1190 pos->has_build_id = true; 1191 } 1192 } 1193 1194 return have_build_id; 1195} 1196 1197void __dsos__add(struct dsos *dsos, struct dso *dso) 1198{ 1199 list_add_tail(&dso->node, &dsos->head); 1200 __dso__findlink_by_longname(&dsos->root, dso, NULL); 1201 /* 1202 * It is now in the linked list, grab a reference, then garbage collect 1203 * this when needing memory, by looking at LRU dso instances in the 1204 * list with atomic_read(&dso->refcnt) == 1, i.e. no references 1205 * anywhere besides the one for the list, do, under a lock for the 1206 * list: remove it from the list, then a dso__put(), that probably will 1207 * be the last and will then call dso__delete(), end of life. 1208 * 1209 * That, or at the end of the 'struct machine' lifetime, when all 1210 * 'struct dso' instances will be removed from the list, in 1211 * dsos__exit(), if they have no other reference from some other data 1212 * structure. 1213 * 1214 * E.g.: after processing a 'perf.data' file and storing references 1215 * to objects instantiated while processing events, we will have 1216 * references to the 'thread', 'map', 'dso' structs all from 'struct 1217 * hist_entry' instances, but we may not need anything not referenced, 1218 * so we might as well call machines__exit()/machines__delete() and 1219 * garbage collect it. 1220 */ 1221 dso__get(dso); 1222} 1223 1224void dsos__add(struct dsos *dsos, struct dso *dso) 1225{ 1226 pthread_rwlock_wrlock(&dsos->lock); 1227 __dsos__add(dsos, dso); 1228 pthread_rwlock_unlock(&dsos->lock); 1229} 1230 1231struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short) 1232{ 1233 struct dso *pos; 1234 1235 if (cmp_short) { 1236 list_for_each_entry(pos, &dsos->head, node) 1237 if (strcmp(pos->short_name, name) == 0) 1238 return pos; 1239 return NULL; 1240 } 1241 return __dso__find_by_longname(&dsos->root, name); 1242} 1243 1244struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short) 1245{ 1246 struct dso *dso; 1247 pthread_rwlock_rdlock(&dsos->lock); 1248 dso = __dsos__find(dsos, name, cmp_short); 1249 pthread_rwlock_unlock(&dsos->lock); 1250 return dso; 1251} 1252 1253struct dso *__dsos__addnew(struct dsos *dsos, const char *name) 1254{ 1255 struct dso *dso = dso__new(name); 1256 1257 if (dso != NULL) { 1258 __dsos__add(dsos, dso); 1259 dso__set_basename(dso); 1260 /* Put dso here because __dsos_add already got it */ 1261 dso__put(dso); 1262 } 1263 return dso; 1264} 1265 1266struct dso *__dsos__findnew(struct dsos *dsos, const char *name) 1267{ 1268 struct dso *dso = __dsos__find(dsos, name, false); 1269 1270 return dso ? dso : __dsos__addnew(dsos, name); 1271} 1272 1273struct dso *dsos__findnew(struct dsos *dsos, const char *name) 1274{ 1275 struct dso *dso; 1276 pthread_rwlock_wrlock(&dsos->lock); 1277 dso = dso__get(__dsos__findnew(dsos, name)); 1278 pthread_rwlock_unlock(&dsos->lock); 1279 return dso; 1280} 1281 1282size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, 1283 bool (skip)(struct dso *dso, int parm), int parm) 1284{ 1285 struct dso *pos; 1286 size_t ret = 0; 1287 1288 list_for_each_entry(pos, head, node) { 1289 if (skip && skip(pos, parm)) 1290 continue; 1291 ret += dso__fprintf_buildid(pos, fp); 1292 ret += fprintf(fp, " %s\n", pos->long_name); 1293 } 1294 return ret; 1295} 1296 1297size_t __dsos__fprintf(struct list_head *head, FILE *fp) 1298{ 1299 struct dso *pos; 1300 size_t ret = 0; 1301 1302 list_for_each_entry(pos, head, node) { 1303 int i; 1304 for (i = 0; i < MAP__NR_TYPES; ++i) 1305 ret += dso__fprintf(pos, i, fp); 1306 } 1307 1308 return ret; 1309} 1310 1311size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) 1312{ 1313 char sbuild_id[SBUILD_ID_SIZE]; 1314 1315 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1316 return fprintf(fp, "%s", sbuild_id); 1317} 1318 1319size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) 1320{ 1321 struct rb_node *nd; 1322 size_t ret = fprintf(fp, "dso: %s (", dso->short_name); 1323 1324 if (dso->short_name != dso->long_name) 1325 ret += fprintf(fp, "%s, ", dso->long_name); 1326 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], 1327 dso__loaded(dso, type) ? "" : "NOT "); 1328 ret += dso__fprintf_buildid(dso, fp); 1329 ret += fprintf(fp, ")\n"); 1330 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { 1331 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 1332 ret += symbol__fprintf(pos, fp); 1333 } 1334 1335 return ret; 1336} 1337 1338enum dso_type dso__type(struct dso *dso, struct machine *machine) 1339{ 1340 int fd; 1341 enum dso_type type = DSO__TYPE_UNKNOWN; 1342 1343 fd = dso__data_get_fd(dso, machine); 1344 if (fd >= 0) { 1345 type = dso__type_fd(fd); 1346 dso__data_put_fd(dso); 1347 } 1348 1349 return type; 1350} 1351 1352int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) 1353{ 1354 int idx, errnum = dso->load_errno; 1355 /* 1356 * This must have a same ordering as the enum dso_load_errno. 1357 */ 1358 static const char *dso_load__error_str[] = { 1359 "Internal tools/perf/ library error", 1360 "Invalid ELF file", 1361 "Can not read build id", 1362 "Mismatching build id", 1363 "Decompression failure", 1364 }; 1365 1366 BUG_ON(buflen == 0); 1367 1368 if (errnum >= 0) { 1369 const char *err = str_error_r(errnum, buf, buflen); 1370 1371 if (err != buf) 1372 scnprintf(buf, buflen, "%s", err); 1373 1374 return 0; 1375 } 1376 1377 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END) 1378 return -1; 1379 1380 idx = errnum - __DSO_LOAD_ERRNO__START; 1381 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); 1382 return 0; 1383}