The open source OpenXR runtime
at prediction-2 7491 lines 193 kB view raw
1/* elf.c -- Get debug data from an ELF file for backtraces. 2 Copyright (C) 2012-2021 Free Software Foundation, Inc. 3 Written by Ian Lance Taylor, Google. 4 5Redistribution and use in source and binary forms, with or without 6modification, are permitted provided that the following conditions are 7met: 8 9 (1) Redistributions of source code must retain the above copyright 10 notice, this list of conditions and the following disclaimer. 11 12 (2) Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in 14 the documentation and/or other materials provided with the 15 distribution. 16 17 (3) The name of the author may not be used to 18 endorse or promote products derived from this software without 19 specific prior written permission. 20 21THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 23WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 25INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 26(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 29STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 30IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31POSSIBILITY OF SUCH DAMAGE. */ 32 33#include "config.h" 34 35#include <errno.h> 36#include <stdlib.h> 37#include <string.h> 38#include <sys/types.h> 39#include <sys/stat.h> 40#include <unistd.h> 41 42#ifdef HAVE_DL_ITERATE_PHDR 43#include <link.h> 44#endif 45 46#include "backtrace.hpp" 47#include "internal.hpp" 48 49#include "../client/TracyFastVector.hpp" 50#include "../common/TracyAlloc.hpp" 51 52#ifndef S_ISLNK 53 #ifndef S_IFLNK 54 #define S_IFLNK 0120000 55 #endif 56 #ifndef S_IFMT 57 #define S_IFMT 0170000 58 #endif 59 #define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK) 60#endif 61 62#ifndef __GNUC__ 63#define __builtin_prefetch(p, r, l) 64#ifndef unlikely 65#define unlikely(x) (x) 66#endif 67#else 68#ifndef unlikely 69#define unlikely(x) __builtin_expect(!!(x), 0) 70#endif 71#endif 72 73namespace tracy 74{ 75 76#ifdef TRACY_DEBUGINFOD 77int GetDebugInfoDescriptor( const char* buildid_data, size_t buildid_size ); 78#endif 79 80#if !defined(HAVE_DECL_STRNLEN) || !HAVE_DECL_STRNLEN 81 82/* If strnlen is not declared, provide our own version. */ 83 84static size_t 85xstrnlen (const char *s, size_t maxlen) 86{ 87 size_t i; 88 89 for (i = 0; i < maxlen; ++i) 90 if (s[i] == '\0') 91 break; 92 return i; 93} 94 95#define strnlen xstrnlen 96 97#endif 98 99#ifndef HAVE_LSTAT 100 101/* Dummy version of lstat for systems that don't have it. */ 102 103static int 104xlstat (const char *path ATTRIBUTE_UNUSED, struct stat *st ATTRIBUTE_UNUSED) 105{ 106 return -1; 107} 108 109#define lstat xlstat 110 111#endif 112 113#ifndef HAVE_READLINK 114 115/* Dummy version of readlink for systems that don't have it. */ 116 117static ssize_t 118xreadlink (const char *path ATTRIBUTE_UNUSED, char *buf ATTRIBUTE_UNUSED, 119 size_t bufsz ATTRIBUTE_UNUSED) 120{ 121 return -1; 122} 123 124#define readlink xreadlink 125 126#endif 127 128#ifndef HAVE_DL_ITERATE_PHDR 129 130/* Dummy version of dl_iterate_phdr for systems that don't have it. */ 131 132#define dl_phdr_info x_dl_phdr_info 133#define dl_iterate_phdr x_dl_iterate_phdr 134 135struct dl_phdr_info 136{ 137 uintptr_t dlpi_addr; 138 const char *dlpi_name; 139}; 140 141static int 142dl_iterate_phdr (int (*callback) (struct dl_phdr_info *, 143 size_t, void *) ATTRIBUTE_UNUSED, 144 void *data ATTRIBUTE_UNUSED) 145{ 146 return 0; 147} 148 149#endif /* ! defined (HAVE_DL_ITERATE_PHDR) */ 150 151/* The configure script must tell us whether we are 32-bit or 64-bit 152 ELF. We could make this code test and support either possibility, 153 but there is no point. This code only works for the currently 154 running executable, which means that we know the ELF mode at 155 configure time. */ 156 157#if BACKTRACE_ELF_SIZE != 32 && BACKTRACE_ELF_SIZE != 64 158#error "Unknown BACKTRACE_ELF_SIZE" 159#endif 160 161/* <link.h> might #include <elf.h> which might define our constants 162 with slightly different values. Undefine them to be safe. */ 163 164#undef EI_NIDENT 165#undef EI_MAG0 166#undef EI_MAG1 167#undef EI_MAG2 168#undef EI_MAG3 169#undef EI_CLASS 170#undef EI_DATA 171#undef EI_VERSION 172#undef ELF_MAG0 173#undef ELF_MAG1 174#undef ELF_MAG2 175#undef ELF_MAG3 176#undef ELFCLASS32 177#undef ELFCLASS64 178#undef ELFDATA2LSB 179#undef ELFDATA2MSB 180#undef EV_CURRENT 181#undef ET_DYN 182#undef EM_PPC64 183#undef EF_PPC64_ABI 184#undef SHN_LORESERVE 185#undef SHN_XINDEX 186#undef SHN_UNDEF 187#undef SHT_PROGBITS 188#undef SHT_SYMTAB 189#undef SHT_STRTAB 190#undef SHT_DYNSYM 191#undef SHF_COMPRESSED 192#undef STT_OBJECT 193#undef STT_FUNC 194#undef NT_GNU_BUILD_ID 195#undef ELFCOMPRESS_ZLIB 196#undef ELFCOMPRESS_ZSTD 197 198/* Basic types. */ 199 200typedef uint16_t b_elf_half; /* Elf_Half. */ 201typedef uint32_t b_elf_word; /* Elf_Word. */ 202typedef int32_t b_elf_sword; /* Elf_Sword. */ 203 204#if BACKTRACE_ELF_SIZE == 32 205 206typedef uint32_t b_elf_addr; /* Elf_Addr. */ 207typedef uint32_t b_elf_off; /* Elf_Off. */ 208 209typedef uint32_t b_elf_wxword; /* 32-bit Elf_Word, 64-bit ELF_Xword. */ 210 211#else 212 213typedef uint64_t b_elf_addr; /* Elf_Addr. */ 214typedef uint64_t b_elf_off; /* Elf_Off. */ 215typedef uint64_t b_elf_xword; /* Elf_Xword. */ 216typedef int64_t b_elf_sxword; /* Elf_Sxword. */ 217 218typedef uint64_t b_elf_wxword; /* 32-bit Elf_Word, 64-bit ELF_Xword. */ 219 220#endif 221 222/* Data structures and associated constants. */ 223 224#define EI_NIDENT 16 225 226typedef struct { 227 unsigned char e_ident[EI_NIDENT]; /* ELF "magic number" */ 228 b_elf_half e_type; /* Identifies object file type */ 229 b_elf_half e_machine; /* Specifies required architecture */ 230 b_elf_word e_version; /* Identifies object file version */ 231 b_elf_addr e_entry; /* Entry point virtual address */ 232 b_elf_off e_phoff; /* Program header table file offset */ 233 b_elf_off e_shoff; /* Section header table file offset */ 234 b_elf_word e_flags; /* Processor-specific flags */ 235 b_elf_half e_ehsize; /* ELF header size in bytes */ 236 b_elf_half e_phentsize; /* Program header table entry size */ 237 b_elf_half e_phnum; /* Program header table entry count */ 238 b_elf_half e_shentsize; /* Section header table entry size */ 239 b_elf_half e_shnum; /* Section header table entry count */ 240 b_elf_half e_shstrndx; /* Section header string table index */ 241} b_elf_ehdr; /* Elf_Ehdr. */ 242 243#define EI_MAG0 0 244#define EI_MAG1 1 245#define EI_MAG2 2 246#define EI_MAG3 3 247#define EI_CLASS 4 248#define EI_DATA 5 249#define EI_VERSION 6 250 251#define ELFMAG0 0x7f 252#define ELFMAG1 'E' 253#define ELFMAG2 'L' 254#define ELFMAG3 'F' 255 256#define ELFCLASS32 1 257#define ELFCLASS64 2 258 259#define ELFDATA2LSB 1 260#define ELFDATA2MSB 2 261 262#define EV_CURRENT 1 263 264#define ET_DYN 3 265 266#define EM_PPC64 21 267#define EF_PPC64_ABI 3 268 269typedef struct { 270 b_elf_word sh_name; /* Section name, index in string tbl */ 271 b_elf_word sh_type; /* Type of section */ 272 b_elf_wxword sh_flags; /* Miscellaneous section attributes */ 273 b_elf_addr sh_addr; /* Section virtual addr at execution */ 274 b_elf_off sh_offset; /* Section file offset */ 275 b_elf_wxword sh_size; /* Size of section in bytes */ 276 b_elf_word sh_link; /* Index of another section */ 277 b_elf_word sh_info; /* Additional section information */ 278 b_elf_wxword sh_addralign; /* Section alignment */ 279 b_elf_wxword sh_entsize; /* Entry size if section holds table */ 280} b_elf_shdr; /* Elf_Shdr. */ 281 282#define SHN_UNDEF 0x0000 /* Undefined section */ 283#define SHN_LORESERVE 0xFF00 /* Begin range of reserved indices */ 284#define SHN_XINDEX 0xFFFF /* Section index is held elsewhere */ 285 286#define SHT_PROGBITS 1 287#define SHT_SYMTAB 2 288#define SHT_STRTAB 3 289#define SHT_DYNSYM 11 290 291#define SHF_COMPRESSED 0x800 292 293#if BACKTRACE_ELF_SIZE == 32 294 295typedef struct 296{ 297 b_elf_word st_name; /* Symbol name, index in string tbl */ 298 b_elf_addr st_value; /* Symbol value */ 299 b_elf_word st_size; /* Symbol size */ 300 unsigned char st_info; /* Symbol binding and type */ 301 unsigned char st_other; /* Visibility and other data */ 302 b_elf_half st_shndx; /* Symbol section index */ 303} b_elf_sym; /* Elf_Sym. */ 304 305#else /* BACKTRACE_ELF_SIZE != 32 */ 306 307typedef struct 308{ 309 b_elf_word st_name; /* Symbol name, index in string tbl */ 310 unsigned char st_info; /* Symbol binding and type */ 311 unsigned char st_other; /* Visibility and other data */ 312 b_elf_half st_shndx; /* Symbol section index */ 313 b_elf_addr st_value; /* Symbol value */ 314 b_elf_xword st_size; /* Symbol size */ 315} b_elf_sym; /* Elf_Sym. */ 316 317#endif /* BACKTRACE_ELF_SIZE != 32 */ 318 319#define STT_OBJECT 1 320#define STT_FUNC 2 321 322typedef struct 323{ 324 uint32_t namesz; 325 uint32_t descsz; 326 uint32_t type; 327 char name[1]; 328} b_elf_note; 329 330#define NT_GNU_BUILD_ID 3 331 332#if BACKTRACE_ELF_SIZE == 32 333 334typedef struct 335{ 336 b_elf_word ch_type; /* Compresstion algorithm */ 337 b_elf_word ch_size; /* Uncompressed size */ 338 b_elf_word ch_addralign; /* Alignment for uncompressed data */ 339} b_elf_chdr; /* Elf_Chdr */ 340 341#else /* BACKTRACE_ELF_SIZE != 32 */ 342 343typedef struct 344{ 345 b_elf_word ch_type; /* Compression algorithm */ 346 b_elf_word ch_reserved; /* Reserved */ 347 b_elf_xword ch_size; /* Uncompressed size */ 348 b_elf_xword ch_addralign; /* Alignment for uncompressed data */ 349} b_elf_chdr; /* Elf_Chdr */ 350 351#endif /* BACKTRACE_ELF_SIZE != 32 */ 352 353#define ELFCOMPRESS_ZLIB 1 354#define ELFCOMPRESS_ZSTD 2 355 356/* Names of sections, indexed by enum dwarf_section in internal.h. */ 357 358static const char * const dwarf_section_names[DEBUG_MAX] = 359{ 360 ".debug_info", 361 ".debug_line", 362 ".debug_abbrev", 363 ".debug_ranges", 364 ".debug_str", 365 ".debug_addr", 366 ".debug_str_offsets", 367 ".debug_line_str", 368 ".debug_rnglists" 369}; 370 371/* Information we gather for the sections we care about. */ 372 373struct debug_section_info 374{ 375 /* Section file offset. */ 376 off_t offset; 377 /* Section size. */ 378 size_t size; 379 /* Section contents, after read from file. */ 380 const unsigned char *data; 381 /* Whether the SHF_COMPRESSED flag is set for the section. */ 382 int compressed; 383}; 384 385/* Information we keep for an ELF symbol. */ 386 387struct elf_symbol 388{ 389 /* The name of the symbol. */ 390 const char *name; 391 /* The address of the symbol. */ 392 uintptr_t address; 393 /* The size of the symbol. */ 394 size_t size; 395}; 396 397/* Information to pass to elf_syminfo. */ 398 399struct elf_syminfo_data 400{ 401 /* Symbols for the next module. */ 402 struct elf_syminfo_data *next; 403 /* The ELF symbols, sorted by address. */ 404 struct elf_symbol *symbols; 405 /* The number of symbols. */ 406 size_t count; 407}; 408 409/* A view that works for either a file or memory. */ 410 411struct elf_view 412{ 413 struct backtrace_view view; 414 int release; /* If non-zero, must call backtrace_release_view. */ 415}; 416 417/* Information about PowerPC64 ELFv1 .opd section. */ 418 419struct elf_ppc64_opd_data 420{ 421 /* Address of the .opd section. */ 422 b_elf_addr addr; 423 /* Section data. */ 424 const char *data; 425 /* Size of the .opd section. */ 426 size_t size; 427 /* Corresponding section view. */ 428 struct elf_view view; 429}; 430 431/* Create a view of SIZE bytes from DESCRIPTOR/MEMORY at OFFSET. */ 432 433static int 434elf_get_view (struct backtrace_state *state, int descriptor, 435 const unsigned char *memory, size_t memory_size, off_t offset, 436 uint64_t size, backtrace_error_callback error_callback, 437 void *data, struct elf_view *view) 438{ 439 if (memory == NULL) 440 { 441 view->release = 1; 442 return backtrace_get_view (state, descriptor, offset, size, 443 error_callback, data, &view->view); 444 } 445 else 446 { 447 if ((uint64_t) offset + size > (uint64_t) memory_size) 448 { 449 error_callback (data, "out of range for in-memory file", 0); 450 return 0; 451 } 452 view->view.data = (const void *) (memory + offset); 453 view->view.base = NULL; 454 view->view.len = size; 455 view->release = 0; 456 return 1; 457 } 458} 459 460/* Release a view read by elf_get_view. */ 461 462static void 463elf_release_view (struct backtrace_state *state, struct elf_view *view, 464 backtrace_error_callback error_callback, void *data) 465{ 466 if (view->release) 467 backtrace_release_view (state, &view->view, error_callback, data); 468} 469 470/* Compute the CRC-32 of BUF/LEN. This uses the CRC used for 471 .gnu_debuglink files. */ 472 473static uint32_t 474elf_crc32 (uint32_t crc, const unsigned char *buf, size_t len) 475{ 476 static const uint32_t crc32_table[256] = 477 { 478 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 479 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 480 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 481 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 482 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, 483 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 484 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 485 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 486 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 487 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, 488 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 489 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 490 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 491 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 492 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, 493 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, 494 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 495 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, 496 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 497 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 498 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 499 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 500 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, 501 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, 502 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 503 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 504 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 505 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 506 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, 507 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 508 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 509 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 510 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 511 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, 512 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 513 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 514 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 515 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 516 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, 517 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, 518 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 519 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, 520 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 521 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 522 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, 523 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 524 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 525 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, 526 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 527 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 528 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 529 0x2d02ef8d 530 }; 531 const unsigned char *end; 532 533 crc = ~crc; 534 for (end = buf + len; buf < end; ++ buf) 535 crc = crc32_table[(crc ^ *buf) & 0xff] ^ (crc >> 8); 536 return ~crc; 537} 538 539/* Return the CRC-32 of the entire file open at DESCRIPTOR. */ 540 541static uint32_t 542elf_crc32_file (struct backtrace_state *state, int descriptor, 543 backtrace_error_callback error_callback, void *data) 544{ 545 struct stat st; 546 struct backtrace_view file_view; 547 uint32_t ret; 548 549 if (fstat (descriptor, &st) < 0) 550 { 551 error_callback (data, "fstat", errno); 552 return 0; 553 } 554 555 if (!backtrace_get_view (state, descriptor, 0, st.st_size, error_callback, 556 data, &file_view)) 557 return 0; 558 559 ret = elf_crc32 (0, (const unsigned char *) file_view.data, st.st_size); 560 561 backtrace_release_view (state, &file_view, error_callback, data); 562 563 return ret; 564} 565 566/* A dummy callback function used when we can't find a symbol 567 table. */ 568 569static void 570elf_nosyms (struct backtrace_state *state ATTRIBUTE_UNUSED, 571 uintptr_t addr ATTRIBUTE_UNUSED, 572 backtrace_syminfo_callback callback ATTRIBUTE_UNUSED, 573 backtrace_error_callback error_callback, void *data) 574{ 575 error_callback (data, "no symbol table in ELF executable", -1); 576} 577 578/* A callback function used when we can't find any debug info. */ 579 580static int 581elf_nodebug (struct backtrace_state *state, uintptr_t pc, 582 backtrace_full_callback callback, 583 backtrace_error_callback error_callback, void *data) 584{ 585 if (state->syminfo_fn != NULL && state->syminfo_fn != elf_nosyms) 586 { 587 struct backtrace_call_full bdata; 588 589 /* Fetch symbol information so that we can least get the 590 function name. */ 591 592 bdata.full_callback = callback; 593 bdata.full_error_callback = error_callback; 594 bdata.full_data = data; 595 bdata.ret = 0; 596 state->syminfo_fn (state, pc, backtrace_syminfo_to_full_callback, 597 backtrace_syminfo_to_full_error_callback, &bdata); 598 return bdata.ret; 599 } 600 601 error_callback (data, "no debug info in ELF executable", -1); 602 return 0; 603} 604 605/* Compare struct elf_symbol for qsort. */ 606 607static int 608elf_symbol_compare (const void *v1, const void *v2) 609{ 610 const struct elf_symbol *e1 = (const struct elf_symbol *) v1; 611 const struct elf_symbol *e2 = (const struct elf_symbol *) v2; 612 613 if (e1->address < e2->address) 614 return -1; 615 else if (e1->address > e2->address) 616 return 1; 617 else 618 return 0; 619} 620 621/* Compare an ADDR against an elf_symbol for bsearch. We allocate one 622 extra entry in the array so that this can look safely at the next 623 entry. */ 624 625static int 626elf_symbol_search (const void *vkey, const void *ventry) 627{ 628 const uintptr_t *key = (const uintptr_t *) vkey; 629 const struct elf_symbol *entry = (const struct elf_symbol *) ventry; 630 uintptr_t addr; 631 632 addr = *key; 633 if (addr < entry->address) 634 return -1; 635 else if (addr >= entry->address + entry->size) 636 return 1; 637 else 638 return 0; 639} 640 641/* Initialize the symbol table info for elf_syminfo. */ 642 643static int 644elf_initialize_syminfo (struct backtrace_state *state, 645 uintptr_t base_address, 646 const unsigned char *symtab_data, size_t symtab_size, 647 const unsigned char *strtab, size_t strtab_size, 648 backtrace_error_callback error_callback, 649 void *data, struct elf_syminfo_data *sdata, 650 struct elf_ppc64_opd_data *opd) 651{ 652 size_t sym_count; 653 const b_elf_sym *sym; 654 size_t elf_symbol_count; 655 size_t elf_symbol_size; 656 struct elf_symbol *elf_symbols; 657 size_t i; 658 unsigned int j; 659 660 sym_count = symtab_size / sizeof (b_elf_sym); 661 662 /* We only care about function symbols. Count them. */ 663 sym = (const b_elf_sym *) symtab_data; 664 elf_symbol_count = 0; 665 for (i = 0; i < sym_count; ++i, ++sym) 666 { 667 int info; 668 669 info = sym->st_info & 0xf; 670 if ((info == STT_FUNC || info == STT_OBJECT) 671 && sym->st_shndx != SHN_UNDEF) 672 ++elf_symbol_count; 673 } 674 675 elf_symbol_size = elf_symbol_count * sizeof (struct elf_symbol); 676 elf_symbols = ((struct elf_symbol *) 677 backtrace_alloc (state, elf_symbol_size, error_callback, 678 data)); 679 if (elf_symbols == NULL) 680 return 0; 681 682 sym = (const b_elf_sym *) symtab_data; 683 j = 0; 684 for (i = 0; i < sym_count; ++i, ++sym) 685 { 686 int info; 687 688 info = sym->st_info & 0xf; 689 if (info != STT_FUNC && info != STT_OBJECT) 690 continue; 691 if (sym->st_shndx == SHN_UNDEF) 692 continue; 693 if (sym->st_name >= strtab_size) 694 { 695 error_callback (data, "symbol string index out of range", 0); 696 backtrace_free (state, elf_symbols, elf_symbol_size, error_callback, 697 data); 698 return 0; 699 } 700 elf_symbols[j].name = (const char *) strtab + sym->st_name; 701 /* Special case PowerPC64 ELFv1 symbols in .opd section, if the symbol 702 is a function descriptor, read the actual code address from the 703 descriptor. */ 704 if (opd 705 && sym->st_value >= opd->addr 706 && sym->st_value < opd->addr + opd->size) 707 elf_symbols[j].address 708 = *(const b_elf_addr *) (opd->data + (sym->st_value - opd->addr)); 709 else 710 elf_symbols[j].address = sym->st_value; 711 elf_symbols[j].address += base_address; 712 elf_symbols[j].size = sym->st_size; 713 ++j; 714 } 715 716 backtrace_qsort (elf_symbols, elf_symbol_count, sizeof (struct elf_symbol), 717 elf_symbol_compare); 718 719 sdata->next = NULL; 720 sdata->symbols = elf_symbols; 721 sdata->count = elf_symbol_count; 722 723 return 1; 724} 725 726/* Add EDATA to the list in STATE. */ 727 728static void 729elf_add_syminfo_data (struct backtrace_state *state, 730 struct elf_syminfo_data *edata) 731{ 732 if (!state->threaded) 733 { 734 struct elf_syminfo_data **pp; 735 736 for (pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data; 737 *pp != NULL; 738 pp = &(*pp)->next) 739 ; 740 *pp = edata; 741 } 742 else 743 { 744 while (1) 745 { 746 struct elf_syminfo_data **pp; 747 748 pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data; 749 750 while (1) 751 { 752 struct elf_syminfo_data *p; 753 754 p = backtrace_atomic_load_pointer (pp); 755 756 if (p == NULL) 757 break; 758 759 pp = &p->next; 760 } 761 762 if (__sync_bool_compare_and_swap (pp, NULL, edata)) 763 break; 764 } 765 } 766} 767 768/* Return the symbol name and value for an ADDR. */ 769 770static void 771elf_syminfo (struct backtrace_state *state, uintptr_t addr, 772 backtrace_syminfo_callback callback, 773 backtrace_error_callback error_callback ATTRIBUTE_UNUSED, 774 void *data) 775{ 776 struct elf_syminfo_data *edata; 777 struct elf_symbol *sym = NULL; 778 779 if (!state->threaded) 780 { 781 for (edata = (struct elf_syminfo_data *) state->syminfo_data; 782 edata != NULL; 783 edata = edata->next) 784 { 785 sym = ((struct elf_symbol *) 786 bsearch (&addr, edata->symbols, edata->count, 787 sizeof (struct elf_symbol), elf_symbol_search)); 788 if (sym != NULL) 789 break; 790 } 791 } 792 else 793 { 794 struct elf_syminfo_data **pp; 795 796 pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data; 797 while (1) 798 { 799 edata = backtrace_atomic_load_pointer (pp); 800 if (edata == NULL) 801 break; 802 803 sym = ((struct elf_symbol *) 804 bsearch (&addr, edata->symbols, edata->count, 805 sizeof (struct elf_symbol), elf_symbol_search)); 806 if (sym != NULL) 807 break; 808 809 pp = &edata->next; 810 } 811 } 812 813 if (sym == NULL) 814 callback (data, addr, NULL, 0, 0); 815 else 816 callback (data, addr, sym->name, sym->address, sym->size); 817} 818 819/* Return whether FILENAME is a symlink. */ 820 821static int 822elf_is_symlink (const char *filename) 823{ 824 struct stat st; 825 826 if (lstat (filename, &st) < 0) 827 return 0; 828 return S_ISLNK (st.st_mode); 829} 830 831/* Return the results of reading the symlink FILENAME in a buffer 832 allocated by backtrace_alloc. Return the length of the buffer in 833 *LEN. */ 834 835static char * 836elf_readlink (struct backtrace_state *state, const char *filename, 837 backtrace_error_callback error_callback, void *data, 838 size_t *plen) 839{ 840 size_t len; 841 char *buf; 842 843 len = 128; 844 while (1) 845 { 846 ssize_t rl; 847 848 buf = (char*)backtrace_alloc (state, len, error_callback, data); 849 if (buf == NULL) 850 return NULL; 851 rl = readlink (filename, buf, len); 852 if (rl < 0) 853 { 854 backtrace_free (state, buf, len, error_callback, data); 855 return NULL; 856 } 857 if ((size_t) rl < len - 1) 858 { 859 buf[rl] = '\0'; 860 *plen = len; 861 return buf; 862 } 863 backtrace_free (state, buf, len, error_callback, data); 864 len *= 2; 865 } 866} 867 868#define SYSTEM_BUILD_ID_DIR "/usr/lib/debug/.build-id/" 869 870/* Open a separate debug info file, using the build ID to find it. 871 Returns an open file descriptor, or -1. 872 873 The GDB manual says that the only place gdb looks for a debug file 874 when the build ID is known is in /usr/lib/debug/.build-id. */ 875 876static int 877elf_open_debugfile_by_buildid (struct backtrace_state *state, 878 const char *buildid_data, size_t buildid_size, 879 const char *filename, 880 backtrace_error_callback error_callback, 881 void *data) 882{ 883 const char * const prefix = SYSTEM_BUILD_ID_DIR; 884 const size_t prefix_len = strlen (prefix); 885 const char * const suffix = ".debug"; 886 const size_t suffix_len = strlen (suffix); 887 size_t len; 888 char *bd_filename; 889 char *t; 890 size_t i; 891 int ret; 892 int does_not_exist; 893 894 len = prefix_len + buildid_size * 2 + suffix_len + 2; 895 bd_filename = (char*)backtrace_alloc (state, len, error_callback, data); 896 if (bd_filename == NULL) 897 return -1; 898 899 t = bd_filename; 900 memcpy (t, prefix, prefix_len); 901 t += prefix_len; 902 for (i = 0; i < buildid_size; i++) 903 { 904 unsigned char b; 905 unsigned char nib; 906 907 b = (unsigned char) buildid_data[i]; 908 nib = (b & 0xf0) >> 4; 909 *t++ = nib < 10 ? '0' + nib : 'a' + nib - 10; 910 nib = b & 0x0f; 911 *t++ = nib < 10 ? '0' + nib : 'a' + nib - 10; 912 if (i == 0) 913 *t++ = '/'; 914 } 915 memcpy (t, suffix, suffix_len); 916 t[suffix_len] = '\0'; 917 918 ret = backtrace_open (bd_filename, error_callback, data, &does_not_exist); 919 920 backtrace_free (state, bd_filename, len, error_callback, data); 921 922 /* gdb checks that the debuginfo file has the same build ID note. 923 That seems kind of pointless to me--why would it have the right 924 name but not the right build ID?--so skipping the check. */ 925 926#ifdef TRACY_DEBUGINFOD 927 if (ret == -1) 928 return GetDebugInfoDescriptor( buildid_data, buildid_size, filename ); 929 else 930 return ret; 931#else 932 return ret; 933#endif 934} 935 936/* Try to open a file whose name is PREFIX (length PREFIX_LEN) 937 concatenated with PREFIX2 (length PREFIX2_LEN) concatenated with 938 DEBUGLINK_NAME. Returns an open file descriptor, or -1. */ 939 940static int 941elf_try_debugfile (struct backtrace_state *state, const char *prefix, 942 size_t prefix_len, const char *prefix2, size_t prefix2_len, 943 const char *debuglink_name, 944 backtrace_error_callback error_callback, void *data) 945{ 946 size_t debuglink_len; 947 size_t try_len; 948 char *Try; 949 int does_not_exist; 950 int ret; 951 952 debuglink_len = strlen (debuglink_name); 953 try_len = prefix_len + prefix2_len + debuglink_len + 1; 954 Try = (char*)backtrace_alloc (state, try_len, error_callback, data); 955 if (Try == NULL) 956 return -1; 957 958 memcpy (Try, prefix, prefix_len); 959 memcpy (Try + prefix_len, prefix2, prefix2_len); 960 memcpy (Try + prefix_len + prefix2_len, debuglink_name, debuglink_len); 961 Try[prefix_len + prefix2_len + debuglink_len] = '\0'; 962 963 ret = backtrace_open (Try, error_callback, data, &does_not_exist); 964 965 backtrace_free (state, Try, try_len, error_callback, data); 966 967 return ret; 968} 969 970/* Find a separate debug info file, using the debuglink section data 971 to find it. Returns an open file descriptor, or -1. */ 972 973static int 974elf_find_debugfile_by_debuglink (struct backtrace_state *state, 975 const char *filename, 976 const char *debuglink_name, 977 backtrace_error_callback error_callback, 978 void *data) 979{ 980 int ret; 981 char *alc; 982 size_t alc_len; 983 const char *slash; 984 int ddescriptor; 985 const char *prefix; 986 size_t prefix_len; 987 988 /* Resolve symlinks in FILENAME. Since FILENAME is fairly likely to 989 be /proc/self/exe, symlinks are common. We don't try to resolve 990 the whole path name, just the base name. */ 991 ret = -1; 992 alc = NULL; 993 alc_len = 0; 994 while (elf_is_symlink (filename)) 995 { 996 char *new_buf; 997 size_t new_len; 998 999 new_buf = elf_readlink (state, filename, error_callback, data, &new_len); 1000 if (new_buf == NULL) 1001 break; 1002 1003 if (new_buf[0] == '/') 1004 filename = new_buf; 1005 else 1006 { 1007 slash = strrchr (filename, '/'); 1008 if (slash == NULL) 1009 filename = new_buf; 1010 else 1011 { 1012 size_t clen; 1013 char *c; 1014 1015 slash++; 1016 clen = slash - filename + strlen (new_buf) + 1; 1017 c = (char*)backtrace_alloc (state, clen, error_callback, data); 1018 if (c == NULL) 1019 goto done; 1020 1021 memcpy (c, filename, slash - filename); 1022 memcpy (c + (slash - filename), new_buf, strlen (new_buf)); 1023 c[slash - filename + strlen (new_buf)] = '\0'; 1024 backtrace_free (state, new_buf, new_len, error_callback, data); 1025 filename = c; 1026 new_buf = c; 1027 new_len = clen; 1028 } 1029 } 1030 1031 if (alc != NULL) 1032 backtrace_free (state, alc, alc_len, error_callback, data); 1033 alc = new_buf; 1034 alc_len = new_len; 1035 } 1036 1037 /* Look for DEBUGLINK_NAME in the same directory as FILENAME. */ 1038 1039 slash = strrchr (filename, '/'); 1040 if (slash == NULL) 1041 { 1042 prefix = ""; 1043 prefix_len = 0; 1044 } 1045 else 1046 { 1047 slash++; 1048 prefix = filename; 1049 prefix_len = slash - filename; 1050 } 1051 1052 ddescriptor = elf_try_debugfile (state, prefix, prefix_len, "", 0, 1053 debuglink_name, error_callback, data); 1054 if (ddescriptor >= 0) 1055 { 1056 ret = ddescriptor; 1057 goto done; 1058 } 1059 1060 /* Look for DEBUGLINK_NAME in a .debug subdirectory of FILENAME. */ 1061 1062 ddescriptor = elf_try_debugfile (state, prefix, prefix_len, ".debug/", 1063 strlen (".debug/"), debuglink_name, 1064 error_callback, data); 1065 if (ddescriptor >= 0) 1066 { 1067 ret = ddescriptor; 1068 goto done; 1069 } 1070 1071 /* Look for DEBUGLINK_NAME in /usr/lib/debug. */ 1072 1073 ddescriptor = elf_try_debugfile (state, "/usr/lib/debug/", 1074 strlen ("/usr/lib/debug/"), prefix, 1075 prefix_len, debuglink_name, 1076 error_callback, data); 1077 if (ddescriptor >= 0) 1078 ret = ddescriptor; 1079 1080 done: 1081 if (alc != NULL && alc_len > 0) 1082 backtrace_free (state, alc, alc_len, error_callback, data); 1083 return ret; 1084} 1085 1086/* Open a separate debug info file, using the debuglink section data 1087 to find it. Returns an open file descriptor, or -1. */ 1088 1089static int 1090elf_open_debugfile_by_debuglink (struct backtrace_state *state, 1091 const char *filename, 1092 const char *debuglink_name, 1093 uint32_t debuglink_crc, 1094 backtrace_error_callback error_callback, 1095 void *data) 1096{ 1097 int ddescriptor; 1098 1099 ddescriptor = elf_find_debugfile_by_debuglink (state, filename, 1100 debuglink_name, 1101 error_callback, data); 1102 if (ddescriptor < 0) 1103 return -1; 1104 1105 if (debuglink_crc != 0) 1106 { 1107 uint32_t got_crc; 1108 1109 got_crc = elf_crc32_file (state, ddescriptor, error_callback, data); 1110 if (got_crc != debuglink_crc) 1111 { 1112 backtrace_close (ddescriptor, error_callback, data); 1113 return -1; 1114 } 1115 } 1116 1117 return ddescriptor; 1118} 1119 1120/* A function useful for setting a breakpoint for an inflation failure 1121 when this code is compiled with -g. */ 1122 1123static void 1124elf_uncompress_failed(void) 1125{ 1126} 1127 1128/* *PVAL is the current value being read from the stream, and *PBITS 1129 is the number of valid bits. Ensure that *PVAL holds at least 15 1130 bits by reading additional bits from *PPIN, up to PINEND, as 1131 needed. Updates *PPIN, *PVAL and *PBITS. Returns 1 on success, 0 1132 on error. */ 1133 1134static int 1135elf_fetch_bits (const unsigned char **ppin, const unsigned char *pinend, 1136 uint64_t *pval, unsigned int *pbits) 1137{ 1138 unsigned int bits; 1139 const unsigned char *pin; 1140 uint64_t val; 1141 uint32_t next; 1142 1143 bits = *pbits; 1144 if (bits >= 15) 1145 return 1; 1146 pin = *ppin; 1147 val = *pval; 1148 1149 if (unlikely (pinend - pin < 4)) 1150 { 1151 elf_uncompress_failed (); 1152 return 0; 1153 } 1154 1155#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) \ 1156 && defined(__ORDER_BIG_ENDIAN__) \ 1157 && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ \ 1158 || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) 1159 /* We've ensured that PIN is aligned. */ 1160 next = *(const uint32_t *)pin; 1161 1162#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 1163 next = __builtin_bswap32 (next); 1164#endif 1165#else 1166 next = pin[0] | (pin[1] << 8) | (pin[2] << 16) | (pin[3] << 24); 1167#endif 1168 1169 val |= (uint64_t)next << bits; 1170 bits += 32; 1171 pin += 4; 1172 1173 /* We will need the next four bytes soon. */ 1174 __builtin_prefetch (pin, 0, 0); 1175 1176 *ppin = pin; 1177 *pval = val; 1178 *pbits = bits; 1179 return 1; 1180} 1181 1182/* This is like elf_fetch_bits, but it fetchs the bits backward, and ensures at 1183 least 16 bits. This is for zstd. */ 1184 1185static int 1186elf_fetch_bits_backward (const unsigned char **ppin, 1187 const unsigned char *pinend, 1188 uint64_t *pval, unsigned int *pbits) 1189{ 1190 unsigned int bits; 1191 const unsigned char *pin; 1192 uint64_t val; 1193 uint32_t next; 1194 1195 bits = *pbits; 1196 if (bits >= 16) 1197 return 1; 1198 pin = *ppin; 1199 val = *pval; 1200 1201 if (unlikely (pin <= pinend)) 1202 { 1203 if (bits == 0) 1204 { 1205 elf_uncompress_failed (); 1206 return 0; 1207 } 1208 return 1; 1209 } 1210 1211 pin -= 4; 1212 1213#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) \ 1214 && defined(__ORDER_BIG_ENDIAN__) \ 1215 && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ \ 1216 || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) 1217 /* We've ensured that PIN is aligned. */ 1218 next = *(const uint32_t *)pin; 1219 1220#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 1221 next = __builtin_bswap32 (next); 1222#endif 1223#else 1224 next = pin[0] | (pin[1] << 8) | (pin[2] << 16) | (pin[3] << 24); 1225#endif 1226 1227 val <<= 32; 1228 val |= next; 1229 bits += 32; 1230 1231 if (unlikely (pin < pinend)) 1232 { 1233 val >>= (pinend - pin) * 8; 1234 bits -= (pinend - pin) * 8; 1235 } 1236 1237 *ppin = pin; 1238 *pval = val; 1239 *pbits = bits; 1240 return 1; 1241} 1242 1243/* Initialize backward fetching when the bitstream starts with a 1 bit in the 1244 last byte in memory (which is the first one that we read). This is used by 1245 zstd decompression. Returns 1 on success, 0 on error. */ 1246 1247static int 1248elf_fetch_backward_init (const unsigned char **ppin, 1249 const unsigned char *pinend, 1250 uint64_t *pval, unsigned int *pbits) 1251{ 1252 const unsigned char *pin; 1253 unsigned int stream_start; 1254 uint64_t val; 1255 unsigned int bits; 1256 1257 pin = *ppin; 1258 stream_start = (unsigned int)*pin; 1259 if (unlikely (stream_start == 0)) 1260 { 1261 elf_uncompress_failed (); 1262 return 0; 1263 } 1264 val = 0; 1265 bits = 0; 1266 1267 /* Align to a 32-bit boundary. */ 1268 while ((((uintptr_t)pin) & 3) != 0) 1269 { 1270 val <<= 8; 1271 val |= (uint64_t)*pin; 1272 bits += 8; 1273 --pin; 1274 } 1275 1276 val <<= 8; 1277 val |= (uint64_t)*pin; 1278 bits += 8; 1279 1280 *ppin = pin; 1281 *pval = val; 1282 *pbits = bits; 1283 if (!elf_fetch_bits_backward (ppin, pinend, pval, pbits)) 1284 return 0; 1285 1286 *pbits -= __builtin_clz (stream_start) - (sizeof (unsigned int) - 1) * 8 + 1; 1287 1288 if (!elf_fetch_bits_backward (ppin, pinend, pval, pbits)) 1289 return 0; 1290 1291 return 1; 1292} 1293 1294/* Huffman code tables, like the rest of the zlib format, are defined 1295 by RFC 1951. We store a Huffman code table as a series of tables 1296 stored sequentially in memory. Each entry in a table is 16 bits. 1297 The first, main, table has 256 entries. It is followed by a set of 1298 secondary tables of length 2 to 128 entries. The maximum length of 1299 a code sequence in the deflate format is 15 bits, so that is all we 1300 need. Each secondary table has an index, which is the offset of 1301 the table in the overall memory storage. 1302 1303 The deflate format says that all codes of a given bit length are 1304 lexicographically consecutive. Perhaps we could have 130 values 1305 that require a 15-bit code, perhaps requiring three secondary 1306 tables of size 128. I don't know if this is actually possible, but 1307 it suggests that the maximum size required for secondary tables is 1308 3 * 128 + 3 * 64 ... == 768. The zlib enough program reports 660 1309 as the maximum. We permit 768, since in addition to the 256 for 1310 the primary table, with two bytes per entry, and with the two 1311 tables we need, that gives us a page. 1312 1313 A single table entry needs to store a value or (for the main table 1314 only) the index and size of a secondary table. Values range from 0 1315 to 285, inclusive. Secondary table indexes, per above, range from 1316 0 to 510. For a value we need to store the number of bits we need 1317 to determine that value (one value may appear multiple times in the 1318 table), which is 1 to 8. For a secondary table we need to store 1319 the number of bits used to index into the table, which is 1 to 7. 1320 And of course we need 1 bit to decide whether we have a value or a 1321 secondary table index. So each entry needs 9 bits for value/table 1322 index, 3 bits for size, 1 bit what it is. For simplicity we use 16 1323 bits per entry. */ 1324 1325/* Number of entries we allocate to for one code table. We get a page 1326 for the two code tables we need. */ 1327 1328#define ZLIB_HUFFMAN_TABLE_SIZE (1024) 1329 1330/* Bit masks and shifts for the values in the table. */ 1331 1332#define ZLIB_HUFFMAN_VALUE_MASK 0x01ff 1333#define ZLIB_HUFFMAN_BITS_SHIFT 9 1334#define ZLIB_HUFFMAN_BITS_MASK 0x7 1335#define ZLIB_HUFFMAN_SECONDARY_SHIFT 12 1336 1337/* For working memory while inflating we need two code tables, we need 1338 an array of code lengths (max value 15, so we use unsigned char), 1339 and an array of unsigned shorts used while building a table. The 1340 latter two arrays must be large enough to hold the maximum number 1341 of code lengths, which RFC 1951 defines as 286 + 30. */ 1342 1343#define ZLIB_TABLE_SIZE \ 1344 (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t) \ 1345 + (286 + 30) * sizeof (uint16_t) \ 1346 + (286 + 30) * sizeof (unsigned char)) 1347 1348#define ZLIB_TABLE_CODELEN_OFFSET \ 1349 (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t) \ 1350 + (286 + 30) * sizeof (uint16_t)) 1351 1352#define ZLIB_TABLE_WORK_OFFSET \ 1353 (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t)) 1354 1355#ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE 1356 1357/* Used by the main function that generates the fixed table to learn 1358 the table size. */ 1359static size_t final_next_secondary; 1360 1361#endif 1362 1363/* Build a Huffman code table from an array of lengths in CODES of 1364 length CODES_LEN. The table is stored into *TABLE. ZDEBUG_TABLE 1365 is the same as for elf_zlib_inflate, used to find some work space. 1366 Returns 1 on success, 0 on error. */ 1367 1368static int 1369elf_zlib_inflate_table (unsigned char *codes, size_t codes_len, 1370 uint16_t *zdebug_table, uint16_t *table) 1371{ 1372 uint16_t count[16]; 1373 uint16_t start[16]; 1374 uint16_t prev[16]; 1375 uint16_t firstcode[7]; 1376 uint16_t *next; 1377 size_t i; 1378 size_t j; 1379 unsigned int code; 1380 size_t next_secondary; 1381 1382 /* Count the number of code of each length. Set NEXT[val] to be the 1383 next value after VAL with the same bit length. */ 1384 1385 next = (uint16_t *) (((unsigned char *) zdebug_table) 1386 + ZLIB_TABLE_WORK_OFFSET); 1387 1388 memset (&count[0], 0, 16 * sizeof (uint16_t)); 1389 for (i = 0; i < codes_len; ++i) 1390 { 1391 if (unlikely (codes[i] >= 16)) 1392 { 1393 elf_uncompress_failed (); 1394 return 0; 1395 } 1396 1397 if (count[codes[i]] == 0) 1398 { 1399 start[codes[i]] = i; 1400 prev[codes[i]] = i; 1401 } 1402 else 1403 { 1404 next[prev[codes[i]]] = i; 1405 prev[codes[i]] = i; 1406 } 1407 1408 ++count[codes[i]]; 1409 } 1410 1411 /* For each length, fill in the table for the codes of that 1412 length. */ 1413 1414 memset (table, 0, ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t)); 1415 1416 /* Handle the values that do not require a secondary table. */ 1417 1418 code = 0; 1419 for (j = 1; j <= 8; ++j) 1420 { 1421 unsigned int jcnt; 1422 unsigned int val; 1423 1424 jcnt = count[j]; 1425 if (jcnt == 0) 1426 continue; 1427 1428 if (unlikely (jcnt > (1U << j))) 1429 { 1430 elf_uncompress_failed (); 1431 return 0; 1432 } 1433 1434 /* There are JCNT values that have this length, the values 1435 starting from START[j] continuing through NEXT[VAL]. Those 1436 values are assigned consecutive values starting at CODE. */ 1437 1438 val = start[j]; 1439 for (i = 0; i < jcnt; ++i) 1440 { 1441 uint16_t tval; 1442 size_t ind; 1443 unsigned int incr; 1444 1445 /* In the compressed bit stream, the value VAL is encoded as 1446 J bits with the value C. */ 1447 1448 if (unlikely ((val & ~ZLIB_HUFFMAN_VALUE_MASK) != 0)) 1449 { 1450 elf_uncompress_failed (); 1451 return 0; 1452 } 1453 1454 tval = val | ((j - 1) << ZLIB_HUFFMAN_BITS_SHIFT); 1455 1456 /* The table lookup uses 8 bits. If J is less than 8, we 1457 don't know what the other bits will be. We need to fill 1458 in all possibilities in the table. Since the Huffman 1459 code is unambiguous, those entries can't be used for any 1460 other code. */ 1461 1462 for (ind = code; ind < 0x100; ind += 1 << j) 1463 { 1464 if (unlikely (table[ind] != 0)) 1465 { 1466 elf_uncompress_failed (); 1467 return 0; 1468 } 1469 table[ind] = tval; 1470 } 1471 1472 /* Advance to the next value with this length. */ 1473 if (i + 1 < jcnt) 1474 val = next[val]; 1475 1476 /* The Huffman codes are stored in the bitstream with the 1477 most significant bit first, as is required to make them 1478 unambiguous. The effect is that when we read them from 1479 the bitstream we see the bit sequence in reverse order: 1480 the most significant bit of the Huffman code is the least 1481 significant bit of the value we read from the bitstream. 1482 That means that to make our table lookups work, we need 1483 to reverse the bits of CODE. Since reversing bits is 1484 tedious and in general requires using a table, we instead 1485 increment CODE in reverse order. That is, if the number 1486 of bits we are currently using, here named J, is 3, we 1487 count as 000, 100, 010, 110, 001, 101, 011, 111, which is 1488 to say the numbers from 0 to 7 but with the bits 1489 reversed. Going to more bits, aka incrementing J, 1490 effectively just adds more zero bits as the beginning, 1491 and as such does not change the numeric value of CODE. 1492 1493 To increment CODE of length J in reverse order, find the 1494 most significant zero bit and set it to one while 1495 clearing all higher bits. In other words, add 1 modulo 1496 2^J, only reversed. */ 1497 1498 incr = 1U << (j - 1); 1499 while ((code & incr) != 0) 1500 incr >>= 1; 1501 if (incr == 0) 1502 code = 0; 1503 else 1504 { 1505 code &= incr - 1; 1506 code += incr; 1507 } 1508 } 1509 } 1510 1511 /* Handle the values that require a secondary table. */ 1512 1513 /* Set FIRSTCODE, the number at which the codes start, for each 1514 length. */ 1515 1516 for (j = 9; j < 16; j++) 1517 { 1518 unsigned int jcnt; 1519 unsigned int k; 1520 1521 jcnt = count[j]; 1522 if (jcnt == 0) 1523 continue; 1524 1525 /* There are JCNT values that have this length, the values 1526 starting from START[j]. Those values are assigned 1527 consecutive values starting at CODE. */ 1528 1529 firstcode[j - 9] = code; 1530 1531 /* Reverse add JCNT to CODE modulo 2^J. */ 1532 for (k = 0; k < j; ++k) 1533 { 1534 if ((jcnt & (1U << k)) != 0) 1535 { 1536 unsigned int m; 1537 unsigned int bit; 1538 1539 bit = 1U << (j - k - 1); 1540 for (m = 0; m < j - k; ++m, bit >>= 1) 1541 { 1542 if ((code & bit) == 0) 1543 { 1544 code += bit; 1545 break; 1546 } 1547 code &= ~bit; 1548 } 1549 jcnt &= ~(1U << k); 1550 } 1551 } 1552 if (unlikely (jcnt != 0)) 1553 { 1554 elf_uncompress_failed (); 1555 return 0; 1556 } 1557 } 1558 1559 /* For J from 9 to 15, inclusive, we store COUNT[J] consecutive 1560 values starting at START[J] with consecutive codes starting at 1561 FIRSTCODE[J - 9]. In the primary table we need to point to the 1562 secondary table, and the secondary table will be indexed by J - 9 1563 bits. We count down from 15 so that we install the larger 1564 secondary tables first, as the smaller ones may be embedded in 1565 the larger ones. */ 1566 1567 next_secondary = 0; /* Index of next secondary table (after primary). */ 1568 for (j = 15; j >= 9; j--) 1569 { 1570 unsigned int jcnt; 1571 unsigned int val; 1572 size_t primary; /* Current primary index. */ 1573 size_t secondary; /* Offset to current secondary table. */ 1574 size_t secondary_bits; /* Bit size of current secondary table. */ 1575 1576 jcnt = count[j]; 1577 if (jcnt == 0) 1578 continue; 1579 1580 val = start[j]; 1581 code = firstcode[j - 9]; 1582 primary = 0x100; 1583 secondary = 0; 1584 secondary_bits = 0; 1585 for (i = 0; i < jcnt; ++i) 1586 { 1587 uint16_t tval; 1588 size_t ind; 1589 unsigned int incr; 1590 1591 if ((code & 0xff) != primary) 1592 { 1593 uint16_t tprimary; 1594 1595 /* Fill in a new primary table entry. */ 1596 1597 primary = code & 0xff; 1598 1599 tprimary = table[primary]; 1600 if (tprimary == 0) 1601 { 1602 /* Start a new secondary table. */ 1603 1604 if (unlikely ((next_secondary & ZLIB_HUFFMAN_VALUE_MASK) 1605 != next_secondary)) 1606 { 1607 elf_uncompress_failed (); 1608 return 0; 1609 } 1610 1611 secondary = next_secondary; 1612 secondary_bits = j - 8; 1613 next_secondary += 1 << secondary_bits; 1614 table[primary] = (secondary 1615 + ((j - 8) << ZLIB_HUFFMAN_BITS_SHIFT) 1616 + (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT)); 1617 } 1618 else 1619 { 1620 /* There is an existing entry. It had better be a 1621 secondary table with enough bits. */ 1622 if (unlikely ((tprimary 1623 & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT)) 1624 == 0)) 1625 { 1626 elf_uncompress_failed (); 1627 return 0; 1628 } 1629 secondary = tprimary & ZLIB_HUFFMAN_VALUE_MASK; 1630 secondary_bits = ((tprimary >> ZLIB_HUFFMAN_BITS_SHIFT) 1631 & ZLIB_HUFFMAN_BITS_MASK); 1632 if (unlikely (secondary_bits < j - 8)) 1633 { 1634 elf_uncompress_failed (); 1635 return 0; 1636 } 1637 } 1638 } 1639 1640 /* Fill in secondary table entries. */ 1641 1642 tval = val | ((j - 8) << ZLIB_HUFFMAN_BITS_SHIFT); 1643 1644 for (ind = code >> 8; 1645 ind < (1U << secondary_bits); 1646 ind += 1U << (j - 8)) 1647 { 1648 if (unlikely (table[secondary + 0x100 + ind] != 0)) 1649 { 1650 elf_uncompress_failed (); 1651 return 0; 1652 } 1653 table[secondary + 0x100 + ind] = tval; 1654 } 1655 1656 if (i + 1 < jcnt) 1657 val = next[val]; 1658 1659 incr = 1U << (j - 1); 1660 while ((code & incr) != 0) 1661 incr >>= 1; 1662 if (incr == 0) 1663 code = 0; 1664 else 1665 { 1666 code &= incr - 1; 1667 code += incr; 1668 } 1669 } 1670 } 1671 1672#ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE 1673 final_next_secondary = next_secondary; 1674#endif 1675 1676 return 1; 1677} 1678 1679#ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE 1680 1681/* Used to generate the fixed Huffman table for block type 1. */ 1682 1683#include <stdio.h> 1684 1685static uint16_t table[ZLIB_TABLE_SIZE]; 1686static unsigned char codes[288]; 1687 1688int 1689main () 1690{ 1691 size_t i; 1692 1693 for (i = 0; i <= 143; ++i) 1694 codes[i] = 8; 1695 for (i = 144; i <= 255; ++i) 1696 codes[i] = 9; 1697 for (i = 256; i <= 279; ++i) 1698 codes[i] = 7; 1699 for (i = 280; i <= 287; ++i) 1700 codes[i] = 8; 1701 if (!elf_zlib_inflate_table (&codes[0], 288, &table[0], &table[0])) 1702 { 1703 fprintf (stderr, "elf_zlib_inflate_table failed\n"); 1704 exit (EXIT_FAILURE); 1705 } 1706 1707 printf ("static const uint16_t elf_zlib_default_table[%#zx] =\n", 1708 final_next_secondary + 0x100); 1709 printf ("{\n"); 1710 for (i = 0; i < final_next_secondary + 0x100; i += 8) 1711 { 1712 size_t j; 1713 1714 printf (" "); 1715 for (j = i; j < final_next_secondary + 0x100 && j < i + 8; ++j) 1716 printf (" %#x,", table[j]); 1717 printf ("\n"); 1718 } 1719 printf ("};\n"); 1720 printf ("\n"); 1721 1722 for (i = 0; i < 32; ++i) 1723 codes[i] = 5; 1724 if (!elf_zlib_inflate_table (&codes[0], 32, &table[0], &table[0])) 1725 { 1726 fprintf (stderr, "elf_zlib_inflate_table failed\n"); 1727 exit (EXIT_FAILURE); 1728 } 1729 1730 printf ("static const uint16_t elf_zlib_default_dist_table[%#zx] =\n", 1731 final_next_secondary + 0x100); 1732 printf ("{\n"); 1733 for (i = 0; i < final_next_secondary + 0x100; i += 8) 1734 { 1735 size_t j; 1736 1737 printf (" "); 1738 for (j = i; j < final_next_secondary + 0x100 && j < i + 8; ++j) 1739 printf (" %#x,", table[j]); 1740 printf ("\n"); 1741 } 1742 printf ("};\n"); 1743 1744 return 0; 1745} 1746 1747#endif 1748 1749/* The fixed tables generated by the #ifdef'ed out main function 1750 above. */ 1751 1752static const uint16_t elf_zlib_default_table[0x170] = 1753{ 1754 0xd00, 0xe50, 0xe10, 0xf18, 0xd10, 0xe70, 0xe30, 0x1230, 1755 0xd08, 0xe60, 0xe20, 0x1210, 0xe00, 0xe80, 0xe40, 0x1250, 1756 0xd04, 0xe58, 0xe18, 0x1200, 0xd14, 0xe78, 0xe38, 0x1240, 1757 0xd0c, 0xe68, 0xe28, 0x1220, 0xe08, 0xe88, 0xe48, 0x1260, 1758 0xd02, 0xe54, 0xe14, 0xf1c, 0xd12, 0xe74, 0xe34, 0x1238, 1759 0xd0a, 0xe64, 0xe24, 0x1218, 0xe04, 0xe84, 0xe44, 0x1258, 1760 0xd06, 0xe5c, 0xe1c, 0x1208, 0xd16, 0xe7c, 0xe3c, 0x1248, 1761 0xd0e, 0xe6c, 0xe2c, 0x1228, 0xe0c, 0xe8c, 0xe4c, 0x1268, 1762 0xd01, 0xe52, 0xe12, 0xf1a, 0xd11, 0xe72, 0xe32, 0x1234, 1763 0xd09, 0xe62, 0xe22, 0x1214, 0xe02, 0xe82, 0xe42, 0x1254, 1764 0xd05, 0xe5a, 0xe1a, 0x1204, 0xd15, 0xe7a, 0xe3a, 0x1244, 1765 0xd0d, 0xe6a, 0xe2a, 0x1224, 0xe0a, 0xe8a, 0xe4a, 0x1264, 1766 0xd03, 0xe56, 0xe16, 0xf1e, 0xd13, 0xe76, 0xe36, 0x123c, 1767 0xd0b, 0xe66, 0xe26, 0x121c, 0xe06, 0xe86, 0xe46, 0x125c, 1768 0xd07, 0xe5e, 0xe1e, 0x120c, 0xd17, 0xe7e, 0xe3e, 0x124c, 1769 0xd0f, 0xe6e, 0xe2e, 0x122c, 0xe0e, 0xe8e, 0xe4e, 0x126c, 1770 0xd00, 0xe51, 0xe11, 0xf19, 0xd10, 0xe71, 0xe31, 0x1232, 1771 0xd08, 0xe61, 0xe21, 0x1212, 0xe01, 0xe81, 0xe41, 0x1252, 1772 0xd04, 0xe59, 0xe19, 0x1202, 0xd14, 0xe79, 0xe39, 0x1242, 1773 0xd0c, 0xe69, 0xe29, 0x1222, 0xe09, 0xe89, 0xe49, 0x1262, 1774 0xd02, 0xe55, 0xe15, 0xf1d, 0xd12, 0xe75, 0xe35, 0x123a, 1775 0xd0a, 0xe65, 0xe25, 0x121a, 0xe05, 0xe85, 0xe45, 0x125a, 1776 0xd06, 0xe5d, 0xe1d, 0x120a, 0xd16, 0xe7d, 0xe3d, 0x124a, 1777 0xd0e, 0xe6d, 0xe2d, 0x122a, 0xe0d, 0xe8d, 0xe4d, 0x126a, 1778 0xd01, 0xe53, 0xe13, 0xf1b, 0xd11, 0xe73, 0xe33, 0x1236, 1779 0xd09, 0xe63, 0xe23, 0x1216, 0xe03, 0xe83, 0xe43, 0x1256, 1780 0xd05, 0xe5b, 0xe1b, 0x1206, 0xd15, 0xe7b, 0xe3b, 0x1246, 1781 0xd0d, 0xe6b, 0xe2b, 0x1226, 0xe0b, 0xe8b, 0xe4b, 0x1266, 1782 0xd03, 0xe57, 0xe17, 0xf1f, 0xd13, 0xe77, 0xe37, 0x123e, 1783 0xd0b, 0xe67, 0xe27, 0x121e, 0xe07, 0xe87, 0xe47, 0x125e, 1784 0xd07, 0xe5f, 0xe1f, 0x120e, 0xd17, 0xe7f, 0xe3f, 0x124e, 1785 0xd0f, 0xe6f, 0xe2f, 0x122e, 0xe0f, 0xe8f, 0xe4f, 0x126e, 1786 0x290, 0x291, 0x292, 0x293, 0x294, 0x295, 0x296, 0x297, 1787 0x298, 0x299, 0x29a, 0x29b, 0x29c, 0x29d, 0x29e, 0x29f, 1788 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x2a4, 0x2a5, 0x2a6, 0x2a7, 1789 0x2a8, 0x2a9, 0x2aa, 0x2ab, 0x2ac, 0x2ad, 0x2ae, 0x2af, 1790 0x2b0, 0x2b1, 0x2b2, 0x2b3, 0x2b4, 0x2b5, 0x2b6, 0x2b7, 1791 0x2b8, 0x2b9, 0x2ba, 0x2bb, 0x2bc, 0x2bd, 0x2be, 0x2bf, 1792 0x2c0, 0x2c1, 0x2c2, 0x2c3, 0x2c4, 0x2c5, 0x2c6, 0x2c7, 1793 0x2c8, 0x2c9, 0x2ca, 0x2cb, 0x2cc, 0x2cd, 0x2ce, 0x2cf, 1794 0x2d0, 0x2d1, 0x2d2, 0x2d3, 0x2d4, 0x2d5, 0x2d6, 0x2d7, 1795 0x2d8, 0x2d9, 0x2da, 0x2db, 0x2dc, 0x2dd, 0x2de, 0x2df, 1796 0x2e0, 0x2e1, 0x2e2, 0x2e3, 0x2e4, 0x2e5, 0x2e6, 0x2e7, 1797 0x2e8, 0x2e9, 0x2ea, 0x2eb, 0x2ec, 0x2ed, 0x2ee, 0x2ef, 1798 0x2f0, 0x2f1, 0x2f2, 0x2f3, 0x2f4, 0x2f5, 0x2f6, 0x2f7, 1799 0x2f8, 0x2f9, 0x2fa, 0x2fb, 0x2fc, 0x2fd, 0x2fe, 0x2ff, 1800}; 1801 1802static const uint16_t elf_zlib_default_dist_table[0x100] = 1803{ 1804 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, 1805 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, 1806 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, 1807 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, 1808 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, 1809 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, 1810 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, 1811 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, 1812 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, 1813 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, 1814 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, 1815 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, 1816 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, 1817 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, 1818 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, 1819 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, 1820 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, 1821 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, 1822 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, 1823 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, 1824 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, 1825 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, 1826 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, 1827 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, 1828 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, 1829 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, 1830 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, 1831 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, 1832 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, 1833 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, 1834 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, 1835 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, 1836}; 1837 1838/* Inflate a zlib stream from PIN/SIN to POUT/SOUT. Return 1 on 1839 success, 0 on some error parsing the stream. */ 1840 1841static int 1842elf_zlib_inflate (const unsigned char *pin, size_t sin, uint16_t *zdebug_table, 1843 unsigned char *pout, size_t sout) 1844{ 1845 unsigned char *porigout; 1846 const unsigned char *pinend; 1847 unsigned char *poutend; 1848 1849 /* We can apparently see multiple zlib streams concatenated 1850 together, so keep going as long as there is something to read. 1851 The last 4 bytes are the checksum. */ 1852 porigout = pout; 1853 pinend = pin + sin; 1854 poutend = pout + sout; 1855 while ((pinend - pin) > 4) 1856 { 1857 uint64_t val; 1858 unsigned int bits; 1859 int last; 1860 1861 /* Read the two byte zlib header. */ 1862 1863 if (unlikely ((pin[0] & 0xf) != 8)) /* 8 is zlib encoding. */ 1864 { 1865 /* Unknown compression method. */ 1866 elf_uncompress_failed (); 1867 return 0; 1868 } 1869 if (unlikely ((pin[0] >> 4) > 7)) 1870 { 1871 /* Window size too large. Other than this check, we don't 1872 care about the window size. */ 1873 elf_uncompress_failed (); 1874 return 0; 1875 } 1876 if (unlikely ((pin[1] & 0x20) != 0)) 1877 { 1878 /* Stream expects a predefined dictionary, but we have no 1879 dictionary. */ 1880 elf_uncompress_failed (); 1881 return 0; 1882 } 1883 val = (pin[0] << 8) | pin[1]; 1884 if (unlikely (val % 31 != 0)) 1885 { 1886 /* Header check failure. */ 1887 elf_uncompress_failed (); 1888 return 0; 1889 } 1890 pin += 2; 1891 1892 /* Align PIN to a 32-bit boundary. */ 1893 1894 val = 0; 1895 bits = 0; 1896 while ((((uintptr_t) pin) & 3) != 0) 1897 { 1898 val |= (uint64_t)*pin << bits; 1899 bits += 8; 1900 ++pin; 1901 } 1902 1903 /* Read blocks until one is marked last. */ 1904 1905 last = 0; 1906 1907 while (!last) 1908 { 1909 unsigned int type; 1910 const uint16_t *tlit; 1911 const uint16_t *tdist; 1912 1913 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 1914 return 0; 1915 1916 last = val & 1; 1917 type = (val >> 1) & 3; 1918 val >>= 3; 1919 bits -= 3; 1920 1921 if (unlikely (type == 3)) 1922 { 1923 /* Invalid block type. */ 1924 elf_uncompress_failed (); 1925 return 0; 1926 } 1927 1928 if (type == 0) 1929 { 1930 uint16_t len; 1931 uint16_t lenc; 1932 1933 /* An uncompressed block. */ 1934 1935 /* If we've read ahead more than a byte, back up. */ 1936 while (bits >= 8) 1937 { 1938 --pin; 1939 bits -= 8; 1940 } 1941 1942 val = 0; 1943 bits = 0; 1944 if (unlikely ((pinend - pin) < 4)) 1945 { 1946 /* Missing length. */ 1947 elf_uncompress_failed (); 1948 return 0; 1949 } 1950 len = pin[0] | (pin[1] << 8); 1951 lenc = pin[2] | (pin[3] << 8); 1952 pin += 4; 1953 lenc = ~lenc; 1954 if (unlikely (len != lenc)) 1955 { 1956 /* Corrupt data. */ 1957 elf_uncompress_failed (); 1958 return 0; 1959 } 1960 if (unlikely (len > (unsigned int) (pinend - pin) 1961 || len > (unsigned int) (poutend - pout))) 1962 { 1963 /* Not enough space in buffers. */ 1964 elf_uncompress_failed (); 1965 return 0; 1966 } 1967 memcpy (pout, pin, len); 1968 pout += len; 1969 pin += len; 1970 1971 /* Align PIN. */ 1972 while ((((uintptr_t) pin) & 3) != 0) 1973 { 1974 val |= (uint64_t)*pin << bits; 1975 bits += 8; 1976 ++pin; 1977 } 1978 1979 /* Go around to read the next block. */ 1980 continue; 1981 } 1982 1983 if (type == 1) 1984 { 1985 tlit = elf_zlib_default_table; 1986 tdist = elf_zlib_default_dist_table; 1987 } 1988 else 1989 { 1990 unsigned int nlit; 1991 unsigned int ndist; 1992 unsigned int nclen; 1993 unsigned char codebits[19]; 1994 unsigned char *plenbase; 1995 unsigned char *plen; 1996 unsigned char *plenend; 1997 1998 /* Read a Huffman encoding table. The various magic 1999 numbers here are from RFC 1951. */ 2000 2001 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2002 return 0; 2003 2004 nlit = (val & 0x1f) + 257; 2005 val >>= 5; 2006 ndist = (val & 0x1f) + 1; 2007 val >>= 5; 2008 nclen = (val & 0xf) + 4; 2009 val >>= 4; 2010 bits -= 14; 2011 if (unlikely (nlit > 286 || ndist > 30)) 2012 { 2013 /* Values out of range. */ 2014 elf_uncompress_failed (); 2015 return 0; 2016 } 2017 2018 /* Read and build the table used to compress the 2019 literal, length, and distance codes. */ 2020 2021 memset(&codebits[0], 0, 19); 2022 2023 /* There are always at least 4 elements in the 2024 table. */ 2025 2026 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2027 return 0; 2028 2029 codebits[16] = val & 7; 2030 codebits[17] = (val >> 3) & 7; 2031 codebits[18] = (val >> 6) & 7; 2032 codebits[0] = (val >> 9) & 7; 2033 val >>= 12; 2034 bits -= 12; 2035 2036 if (nclen == 4) 2037 goto codebitsdone; 2038 2039 codebits[8] = val & 7; 2040 val >>= 3; 2041 bits -= 3; 2042 2043 if (nclen == 5) 2044 goto codebitsdone; 2045 2046 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2047 return 0; 2048 2049 codebits[7] = val & 7; 2050 val >>= 3; 2051 bits -= 3; 2052 2053 if (nclen == 6) 2054 goto codebitsdone; 2055 2056 codebits[9] = val & 7; 2057 val >>= 3; 2058 bits -= 3; 2059 2060 if (nclen == 7) 2061 goto codebitsdone; 2062 2063 codebits[6] = val & 7; 2064 val >>= 3; 2065 bits -= 3; 2066 2067 if (nclen == 8) 2068 goto codebitsdone; 2069 2070 codebits[10] = val & 7; 2071 val >>= 3; 2072 bits -= 3; 2073 2074 if (nclen == 9) 2075 goto codebitsdone; 2076 2077 codebits[5] = val & 7; 2078 val >>= 3; 2079 bits -= 3; 2080 2081 if (nclen == 10) 2082 goto codebitsdone; 2083 2084 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2085 return 0; 2086 2087 codebits[11] = val & 7; 2088 val >>= 3; 2089 bits -= 3; 2090 2091 if (nclen == 11) 2092 goto codebitsdone; 2093 2094 codebits[4] = val & 7; 2095 val >>= 3; 2096 bits -= 3; 2097 2098 if (nclen == 12) 2099 goto codebitsdone; 2100 2101 codebits[12] = val & 7; 2102 val >>= 3; 2103 bits -= 3; 2104 2105 if (nclen == 13) 2106 goto codebitsdone; 2107 2108 codebits[3] = val & 7; 2109 val >>= 3; 2110 bits -= 3; 2111 2112 if (nclen == 14) 2113 goto codebitsdone; 2114 2115 codebits[13] = val & 7; 2116 val >>= 3; 2117 bits -= 3; 2118 2119 if (nclen == 15) 2120 goto codebitsdone; 2121 2122 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2123 return 0; 2124 2125 codebits[2] = val & 7; 2126 val >>= 3; 2127 bits -= 3; 2128 2129 if (nclen == 16) 2130 goto codebitsdone; 2131 2132 codebits[14] = val & 7; 2133 val >>= 3; 2134 bits -= 3; 2135 2136 if (nclen == 17) 2137 goto codebitsdone; 2138 2139 codebits[1] = val & 7; 2140 val >>= 3; 2141 bits -= 3; 2142 2143 if (nclen == 18) 2144 goto codebitsdone; 2145 2146 codebits[15] = val & 7; 2147 val >>= 3; 2148 bits -= 3; 2149 2150 codebitsdone: 2151 2152 if (!elf_zlib_inflate_table (codebits, 19, zdebug_table, 2153 zdebug_table)) 2154 return 0; 2155 2156 /* Read the compressed bit lengths of the literal, 2157 length, and distance codes. We have allocated space 2158 at the end of zdebug_table to hold them. */ 2159 2160 plenbase = (((unsigned char *) zdebug_table) 2161 + ZLIB_TABLE_CODELEN_OFFSET); 2162 plen = plenbase; 2163 plenend = plen + nlit + ndist; 2164 while (plen < plenend) 2165 { 2166 uint16_t t; 2167 unsigned int b; 2168 uint16_t v; 2169 2170 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2171 return 0; 2172 2173 t = zdebug_table[val & 0xff]; 2174 2175 /* The compression here uses bit lengths up to 7, so 2176 a secondary table is never necessary. */ 2177 if (unlikely ((t & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT)) 2178 != 0)) 2179 { 2180 elf_uncompress_failed (); 2181 return 0; 2182 } 2183 2184 b = (t >> ZLIB_HUFFMAN_BITS_SHIFT) & ZLIB_HUFFMAN_BITS_MASK; 2185 val >>= b + 1; 2186 bits -= b + 1; 2187 2188 v = t & ZLIB_HUFFMAN_VALUE_MASK; 2189 if (v < 16) 2190 *plen++ = v; 2191 else if (v == 16) 2192 { 2193 unsigned int c; 2194 unsigned int prev; 2195 2196 /* Copy previous entry 3 to 6 times. */ 2197 2198 if (unlikely (plen == plenbase)) 2199 { 2200 elf_uncompress_failed (); 2201 return 0; 2202 } 2203 2204 /* We used up to 7 bits since the last 2205 elf_fetch_bits, so we have at least 8 bits 2206 available here. */ 2207 2208 c = 3 + (val & 0x3); 2209 val >>= 2; 2210 bits -= 2; 2211 if (unlikely ((unsigned int) (plenend - plen) < c)) 2212 { 2213 elf_uncompress_failed (); 2214 return 0; 2215 } 2216 2217 prev = plen[-1]; 2218 switch (c) 2219 { 2220 case 6: 2221 *plen++ = prev; 2222 ATTRIBUTE_FALLTHROUGH; 2223 case 5: 2224 *plen++ = prev; 2225 ATTRIBUTE_FALLTHROUGH; 2226 case 4: 2227 *plen++ = prev; 2228 } 2229 *plen++ = prev; 2230 *plen++ = prev; 2231 *plen++ = prev; 2232 } 2233 else if (v == 17) 2234 { 2235 unsigned int c; 2236 2237 /* Store zero 3 to 10 times. */ 2238 2239 /* We used up to 7 bits since the last 2240 elf_fetch_bits, so we have at least 8 bits 2241 available here. */ 2242 2243 c = 3 + (val & 0x7); 2244 val >>= 3; 2245 bits -= 3; 2246 if (unlikely ((unsigned int) (plenend - plen) < c)) 2247 { 2248 elf_uncompress_failed (); 2249 return 0; 2250 } 2251 2252 switch (c) 2253 { 2254 case 10: 2255 *plen++ = 0; 2256 ATTRIBUTE_FALLTHROUGH; 2257 case 9: 2258 *plen++ = 0; 2259 ATTRIBUTE_FALLTHROUGH; 2260 case 8: 2261 *plen++ = 0; 2262 ATTRIBUTE_FALLTHROUGH; 2263 case 7: 2264 *plen++ = 0; 2265 ATTRIBUTE_FALLTHROUGH; 2266 case 6: 2267 *plen++ = 0; 2268 ATTRIBUTE_FALLTHROUGH; 2269 case 5: 2270 *plen++ = 0; 2271 ATTRIBUTE_FALLTHROUGH; 2272 case 4: 2273 *plen++ = 0; 2274 } 2275 *plen++ = 0; 2276 *plen++ = 0; 2277 *plen++ = 0; 2278 } 2279 else if (v == 18) 2280 { 2281 unsigned int c; 2282 2283 /* Store zero 11 to 138 times. */ 2284 2285 /* We used up to 7 bits since the last 2286 elf_fetch_bits, so we have at least 8 bits 2287 available here. */ 2288 2289 c = 11 + (val & 0x7f); 2290 val >>= 7; 2291 bits -= 7; 2292 if (unlikely ((unsigned int) (plenend - plen) < c)) 2293 { 2294 elf_uncompress_failed (); 2295 return 0; 2296 } 2297 2298 memset (plen, 0, c); 2299 plen += c; 2300 } 2301 else 2302 { 2303 elf_uncompress_failed (); 2304 return 0; 2305 } 2306 } 2307 2308 /* Make sure that the stop code can appear. */ 2309 2310 plen = plenbase; 2311 if (unlikely (plen[256] == 0)) 2312 { 2313 elf_uncompress_failed (); 2314 return 0; 2315 } 2316 2317 /* Build the decompression tables. */ 2318 2319 if (!elf_zlib_inflate_table (plen, nlit, zdebug_table, 2320 zdebug_table)) 2321 return 0; 2322 if (!elf_zlib_inflate_table (plen + nlit, ndist, zdebug_table, 2323 (zdebug_table 2324 + ZLIB_HUFFMAN_TABLE_SIZE))) 2325 return 0; 2326 tlit = zdebug_table; 2327 tdist = zdebug_table + ZLIB_HUFFMAN_TABLE_SIZE; 2328 } 2329 2330 /* Inflate values until the end of the block. This is the 2331 main loop of the inflation code. */ 2332 2333 while (1) 2334 { 2335 uint16_t t; 2336 unsigned int b; 2337 uint16_t v; 2338 unsigned int lit; 2339 2340 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2341 return 0; 2342 2343 t = tlit[val & 0xff]; 2344 b = (t >> ZLIB_HUFFMAN_BITS_SHIFT) & ZLIB_HUFFMAN_BITS_MASK; 2345 v = t & ZLIB_HUFFMAN_VALUE_MASK; 2346 2347 if ((t & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT)) == 0) 2348 { 2349 lit = v; 2350 val >>= b + 1; 2351 bits -= b + 1; 2352 } 2353 else 2354 { 2355 t = tlit[v + 0x100 + ((val >> 8) & ((1U << b) - 1))]; 2356 b = (t >> ZLIB_HUFFMAN_BITS_SHIFT) & ZLIB_HUFFMAN_BITS_MASK; 2357 lit = t & ZLIB_HUFFMAN_VALUE_MASK; 2358 val >>= b + 8; 2359 bits -= b + 8; 2360 } 2361 2362 if (lit < 256) 2363 { 2364 if (unlikely (pout == poutend)) 2365 { 2366 elf_uncompress_failed (); 2367 return 0; 2368 } 2369 2370 *pout++ = lit; 2371 2372 /* We will need to write the next byte soon. We ask 2373 for high temporal locality because we will write 2374 to the whole cache line soon. */ 2375 __builtin_prefetch (pout, 1, 3); 2376 } 2377 else if (lit == 256) 2378 { 2379 /* The end of the block. */ 2380 break; 2381 } 2382 else 2383 { 2384 unsigned int dist; 2385 unsigned int len; 2386 2387 /* Convert lit into a length. */ 2388 2389 if (lit < 265) 2390 len = lit - 257 + 3; 2391 else if (lit == 285) 2392 len = 258; 2393 else if (unlikely (lit > 285)) 2394 { 2395 elf_uncompress_failed (); 2396 return 0; 2397 } 2398 else 2399 { 2400 unsigned int extra; 2401 2402 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2403 return 0; 2404 2405 /* This is an expression for the table of length 2406 codes in RFC 1951 3.2.5. */ 2407 lit -= 265; 2408 extra = (lit >> 2) + 1; 2409 len = (lit & 3) << extra; 2410 len += 11; 2411 len += ((1U << (extra - 1)) - 1) << 3; 2412 len += val & ((1U << extra) - 1); 2413 val >>= extra; 2414 bits -= extra; 2415 } 2416 2417 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2418 return 0; 2419 2420 t = tdist[val & 0xff]; 2421 b = (t >> ZLIB_HUFFMAN_BITS_SHIFT) & ZLIB_HUFFMAN_BITS_MASK; 2422 v = t & ZLIB_HUFFMAN_VALUE_MASK; 2423 2424 if ((t & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT)) == 0) 2425 { 2426 dist = v; 2427 val >>= b + 1; 2428 bits -= b + 1; 2429 } 2430 else 2431 { 2432 t = tdist[v + 0x100 + ((val >> 8) & ((1U << b) - 1))]; 2433 b = ((t >> ZLIB_HUFFMAN_BITS_SHIFT) 2434 & ZLIB_HUFFMAN_BITS_MASK); 2435 dist = t & ZLIB_HUFFMAN_VALUE_MASK; 2436 val >>= b + 8; 2437 bits -= b + 8; 2438 } 2439 2440 /* Convert dist to a distance. */ 2441 2442 if (dist == 0) 2443 { 2444 /* A distance of 1. A common case, meaning 2445 repeat the last character LEN times. */ 2446 2447 if (unlikely (pout == porigout)) 2448 { 2449 elf_uncompress_failed (); 2450 return 0; 2451 } 2452 2453 if (unlikely ((unsigned int) (poutend - pout) < len)) 2454 { 2455 elf_uncompress_failed (); 2456 return 0; 2457 } 2458 2459 memset (pout, pout[-1], len); 2460 pout += len; 2461 } 2462 else if (unlikely (dist > 29)) 2463 { 2464 elf_uncompress_failed (); 2465 return 0; 2466 } 2467 else 2468 { 2469 if (dist < 4) 2470 dist = dist + 1; 2471 else 2472 { 2473 unsigned int extra; 2474 2475 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2476 return 0; 2477 2478 /* This is an expression for the table of 2479 distance codes in RFC 1951 3.2.5. */ 2480 dist -= 4; 2481 extra = (dist >> 1) + 1; 2482 dist = (dist & 1) << extra; 2483 dist += 5; 2484 dist += ((1U << (extra - 1)) - 1) << 2; 2485 dist += val & ((1U << extra) - 1); 2486 val >>= extra; 2487 bits -= extra; 2488 } 2489 2490 /* Go back dist bytes, and copy len bytes from 2491 there. */ 2492 2493 if (unlikely ((unsigned int) (pout - porigout) < dist)) 2494 { 2495 elf_uncompress_failed (); 2496 return 0; 2497 } 2498 2499 if (unlikely ((unsigned int) (poutend - pout) < len)) 2500 { 2501 elf_uncompress_failed (); 2502 return 0; 2503 } 2504 2505 if (dist >= len) 2506 { 2507 memcpy (pout, pout - dist, len); 2508 pout += len; 2509 } 2510 else 2511 { 2512 while (len > 0) 2513 { 2514 unsigned int copy; 2515 2516 copy = len < dist ? len : dist; 2517 memcpy (pout, pout - dist, copy); 2518 len -= copy; 2519 pout += copy; 2520 } 2521 } 2522 } 2523 } 2524 } 2525 } 2526 } 2527 2528 /* We should have filled the output buffer. */ 2529 if (unlikely (pout != poutend)) 2530 { 2531 elf_uncompress_failed (); 2532 return 0; 2533 } 2534 2535 return 1; 2536} 2537 2538/* Verify the zlib checksum. The checksum is in the 4 bytes at 2539 CHECKBYTES, and the uncompressed data is at UNCOMPRESSED / 2540 UNCOMPRESSED_SIZE. Returns 1 on success, 0 on failure. */ 2541 2542static int 2543elf_zlib_verify_checksum (const unsigned char *checkbytes, 2544 const unsigned char *uncompressed, 2545 size_t uncompressed_size) 2546{ 2547 unsigned int i; 2548 unsigned int cksum; 2549 const unsigned char *p; 2550 uint32_t s1; 2551 uint32_t s2; 2552 size_t hsz; 2553 2554 cksum = 0; 2555 for (i = 0; i < 4; i++) 2556 cksum = (cksum << 8) | checkbytes[i]; 2557 2558 s1 = 1; 2559 s2 = 0; 2560 2561 /* Minimize modulo operations. */ 2562 2563 p = uncompressed; 2564 hsz = uncompressed_size; 2565 while (hsz >= 5552) 2566 { 2567 for (i = 0; i < 5552; i += 16) 2568 { 2569 /* Manually unroll loop 16 times. */ 2570 s1 = s1 + *p++; 2571 s2 = s2 + s1; 2572 s1 = s1 + *p++; 2573 s2 = s2 + s1; 2574 s1 = s1 + *p++; 2575 s2 = s2 + s1; 2576 s1 = s1 + *p++; 2577 s2 = s2 + s1; 2578 s1 = s1 + *p++; 2579 s2 = s2 + s1; 2580 s1 = s1 + *p++; 2581 s2 = s2 + s1; 2582 s1 = s1 + *p++; 2583 s2 = s2 + s1; 2584 s1 = s1 + *p++; 2585 s2 = s2 + s1; 2586 s1 = s1 + *p++; 2587 s2 = s2 + s1; 2588 s1 = s1 + *p++; 2589 s2 = s2 + s1; 2590 s1 = s1 + *p++; 2591 s2 = s2 + s1; 2592 s1 = s1 + *p++; 2593 s2 = s2 + s1; 2594 s1 = s1 + *p++; 2595 s2 = s2 + s1; 2596 s1 = s1 + *p++; 2597 s2 = s2 + s1; 2598 s1 = s1 + *p++; 2599 s2 = s2 + s1; 2600 s1 = s1 + *p++; 2601 s2 = s2 + s1; 2602 } 2603 hsz -= 5552; 2604 s1 %= 65521; 2605 s2 %= 65521; 2606 } 2607 2608 while (hsz >= 16) 2609 { 2610 /* Manually unroll loop 16 times. */ 2611 s1 = s1 + *p++; 2612 s2 = s2 + s1; 2613 s1 = s1 + *p++; 2614 s2 = s2 + s1; 2615 s1 = s1 + *p++; 2616 s2 = s2 + s1; 2617 s1 = s1 + *p++; 2618 s2 = s2 + s1; 2619 s1 = s1 + *p++; 2620 s2 = s2 + s1; 2621 s1 = s1 + *p++; 2622 s2 = s2 + s1; 2623 s1 = s1 + *p++; 2624 s2 = s2 + s1; 2625 s1 = s1 + *p++; 2626 s2 = s2 + s1; 2627 s1 = s1 + *p++; 2628 s2 = s2 + s1; 2629 s1 = s1 + *p++; 2630 s2 = s2 + s1; 2631 s1 = s1 + *p++; 2632 s2 = s2 + s1; 2633 s1 = s1 + *p++; 2634 s2 = s2 + s1; 2635 s1 = s1 + *p++; 2636 s2 = s2 + s1; 2637 s1 = s1 + *p++; 2638 s2 = s2 + s1; 2639 s1 = s1 + *p++; 2640 s2 = s2 + s1; 2641 s1 = s1 + *p++; 2642 s2 = s2 + s1; 2643 2644 hsz -= 16; 2645 } 2646 2647 for (i = 0; i < hsz; ++i) 2648 { 2649 s1 = s1 + *p++; 2650 s2 = s2 + s1; 2651 } 2652 2653 s1 %= 65521; 2654 s2 %= 65521; 2655 2656 if (unlikely ((s2 << 16) + s1 != cksum)) 2657 { 2658 elf_uncompress_failed (); 2659 return 0; 2660 } 2661 2662 return 1; 2663} 2664 2665/* Inflate a zlib stream from PIN/SIN to POUT/SOUT, and verify the 2666 checksum. Return 1 on success, 0 on error. */ 2667 2668static int 2669elf_zlib_inflate_and_verify (const unsigned char *pin, size_t sin, 2670 uint16_t *zdebug_table, unsigned char *pout, 2671 size_t sout) 2672{ 2673 if (!elf_zlib_inflate (pin, sin, zdebug_table, pout, sout)) 2674 return 0; 2675 if (!elf_zlib_verify_checksum (pin + sin - 4, pout, sout)) 2676 return 0; 2677 return 1; 2678} 2679 2680/* For working memory during zstd compression, we need 2681 - a literal length FSE table: 512 64-bit values == 4096 bytes 2682 - a match length FSE table: 512 64-bit values == 4096 bytes 2683 - a offset FSE table: 256 64-bit values == 2048 bytes 2684 - a Huffman tree: 2048 uint16_t values == 4096 bytes 2685 - scratch space, one of 2686 - to build an FSE table: 512 uint16_t values == 1024 bytes 2687 - to build a Huffman tree: 512 uint16_t + 256 uint32_t == 2048 bytes 2688*/ 2689 2690#define ZSTD_TABLE_SIZE \ 2691 (2 * 512 * sizeof (struct elf_zstd_fse_baseline_entry) \ 2692 + 256 * sizeof (struct elf_zstd_fse_baseline_entry) \ 2693 + 2048 * sizeof (uint16_t) \ 2694 + 512 * sizeof (uint16_t) + 256 * sizeof (uint32_t)) 2695 2696#define ZSTD_TABLE_LITERAL_FSE_OFFSET (0) 2697 2698#define ZSTD_TABLE_MATCH_FSE_OFFSET \ 2699 (512 * sizeof (struct elf_zstd_fse_baseline_entry)) 2700 2701#define ZSTD_TABLE_OFFSET_FSE_OFFSET \ 2702 (ZSTD_TABLE_MATCH_FSE_OFFSET \ 2703 + 512 * sizeof (struct elf_zstd_fse_baseline_entry)) 2704 2705#define ZSTD_TABLE_HUFFMAN_OFFSET \ 2706 (ZSTD_TABLE_OFFSET_FSE_OFFSET \ 2707 + 256 * sizeof (struct elf_zstd_fse_baseline_entry)) 2708 2709#define ZSTD_TABLE_WORK_OFFSET \ 2710 (ZSTD_TABLE_HUFFMAN_OFFSET + 2048 * sizeof (uint16_t)) 2711 2712/* An entry in a zstd FSE table. */ 2713 2714struct elf_zstd_fse_entry 2715{ 2716 /* The value that this FSE entry represents. */ 2717 unsigned char symbol; 2718 /* The number of bits to read to determine the next state. */ 2719 unsigned char bits; 2720 /* Add the bits to this base to get the next state. */ 2721 uint16_t base; 2722}; 2723 2724static int 2725elf_zstd_build_fse (const int16_t *, int, uint16_t *, int, 2726 struct elf_zstd_fse_entry *); 2727 2728/* Read a zstd FSE table and build the decoding table in *TABLE, updating *PPIN 2729 as it reads. ZDEBUG_TABLE is scratch space; it must be enough for 512 2730 uint16_t values (1024 bytes). MAXIDX is the maximum number of symbols 2731 permitted. *TABLE_BITS is the maximum number of bits for symbols in the 2732 table: the size of *TABLE is at least 1 << *TABLE_BITS. This updates 2733 *TABLE_BITS to the actual number of bits. Returns 1 on success, 0 on 2734 error. */ 2735 2736static int 2737elf_zstd_read_fse (const unsigned char **ppin, const unsigned char *pinend, 2738 uint16_t *zdebug_table, int maxidx, 2739 struct elf_zstd_fse_entry *table, int *table_bits) 2740{ 2741 const unsigned char *pin; 2742 int16_t *norm; 2743 uint16_t *next; 2744 uint64_t val; 2745 unsigned int bits; 2746 int accuracy_log; 2747 uint32_t remaining; 2748 uint32_t threshold; 2749 int bits_needed; 2750 int idx; 2751 int prev0; 2752 2753 pin = *ppin; 2754 2755 norm = (int16_t *) zdebug_table; 2756 next = zdebug_table + 256; 2757 2758 if (unlikely (pin + 3 >= pinend)) 2759 { 2760 elf_uncompress_failed (); 2761 return 0; 2762 } 2763 2764 /* Align PIN to a 32-bit boundary. */ 2765 2766 val = 0; 2767 bits = 0; 2768 while ((((uintptr_t) pin) & 3) != 0) 2769 { 2770 val |= (uint64_t)*pin << bits; 2771 bits += 8; 2772 ++pin; 2773 } 2774 2775 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2776 return 0; 2777 2778 accuracy_log = (val & 0xf) + 5; 2779 if (accuracy_log > *table_bits) 2780 { 2781 elf_uncompress_failed (); 2782 return 0; 2783 } 2784 *table_bits = accuracy_log; 2785 val >>= 4; 2786 bits -= 4; 2787 2788 /* This code is mostly copied from the reference implementation. */ 2789 2790 /* The number of remaining probabilities, plus 1. This sets the number of 2791 bits that need to be read for the next value. */ 2792 remaining = (1 << accuracy_log) + 1; 2793 2794 /* The current difference between small and large values, which depends on 2795 the number of remaining values. Small values use one less bit. */ 2796 threshold = 1 << accuracy_log; 2797 2798 /* The number of bits used to compute threshold. */ 2799 bits_needed = accuracy_log + 1; 2800 2801 /* The next character value. */ 2802 idx = 0; 2803 2804 /* Whether the last count was 0. */ 2805 prev0 = 0; 2806 2807 while (remaining > 1 && idx <= maxidx) 2808 { 2809 uint32_t max; 2810 int32_t count; 2811 2812 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2813 return 0; 2814 2815 if (prev0) 2816 { 2817 int zidx; 2818 2819 /* Previous count was 0, so there is a 2-bit repeat flag. If the 2820 2-bit flag is 0b11, it adds 3 and then there is another repeat 2821 flag. */ 2822 zidx = idx; 2823 while ((val & 0xfff) == 0xfff) 2824 { 2825 zidx += 3 * 6; 2826 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2827 return 0; 2828 val >>= 12; 2829 bits -= 12; 2830 } 2831 while ((val & 3) == 3) 2832 { 2833 zidx += 3; 2834 if (!elf_fetch_bits (&pin, pinend, &val, &bits)) 2835 return 0; 2836 val >>= 2; 2837 bits -= 2; 2838 } 2839 /* We have at least 13 bits here, don't need to fetch. */ 2840 zidx += val & 3; 2841 val >>= 2; 2842 bits -= 2; 2843 2844 if (unlikely (zidx > maxidx)) 2845 { 2846 elf_uncompress_failed (); 2847 return 0; 2848 } 2849 2850 for (; idx < zidx; idx++) 2851 norm[idx] = 0; 2852 2853 prev0 = 0; 2854 continue; 2855 } 2856 2857 max = (2 * threshold - 1) - remaining; 2858 if ((val & (threshold - 1)) < max) 2859 { 2860 /* A small value. */ 2861 count = (int32_t) ((uint32_t) val & (threshold - 1)); 2862 val >>= bits_needed - 1; 2863 bits -= bits_needed - 1; 2864 } 2865 else 2866 { 2867 /* A large value. */ 2868 count = (int32_t) ((uint32_t) val & (2 * threshold - 1)); 2869 if (count >= (int32_t) threshold) 2870 count -= (int32_t) max; 2871 val >>= bits_needed; 2872 bits -= bits_needed; 2873 } 2874 2875 count--; 2876 if (count >= 0) 2877 remaining -= count; 2878 else 2879 remaining--; 2880 if (unlikely (idx >= 256)) 2881 { 2882 elf_uncompress_failed (); 2883 return 0; 2884 } 2885 norm[idx] = (int16_t) count; 2886 ++idx; 2887 2888 prev0 = count == 0; 2889 2890 while (remaining < threshold) 2891 { 2892 bits_needed--; 2893 threshold >>= 1; 2894 } 2895 } 2896 2897 if (unlikely (remaining != 1)) 2898 { 2899 elf_uncompress_failed (); 2900 return 0; 2901 } 2902 2903 /* If we've read ahead more than a byte, back up. */ 2904 while (bits >= 8) 2905 { 2906 --pin; 2907 bits -= 8; 2908 } 2909 2910 *ppin = pin; 2911 2912 for (; idx <= maxidx; idx++) 2913 norm[idx] = 0; 2914 2915 return elf_zstd_build_fse (norm, idx, next, *table_bits, table); 2916} 2917 2918/* Build the FSE decoding table from a list of probabilities. This reads from 2919 NORM of length IDX, uses NEXT as scratch space, and writes to *TABLE, whose 2920 size is TABLE_BITS. */ 2921 2922static int 2923elf_zstd_build_fse (const int16_t *norm, int idx, uint16_t *next, 2924 int table_bits, struct elf_zstd_fse_entry *table) 2925{ 2926 int table_size; 2927 int high_threshold; 2928 int i; 2929 int pos; 2930 int step; 2931 int mask; 2932 2933 table_size = 1 << table_bits; 2934 high_threshold = table_size - 1; 2935 for (i = 0; i < idx; i++) 2936 { 2937 int16_t n; 2938 2939 n = norm[i]; 2940 if (n >= 0) 2941 next[i] = (uint16_t) n; 2942 else 2943 { 2944 table[high_threshold].symbol = (unsigned char) i; 2945 high_threshold--; 2946 next[i] = 1; 2947 } 2948 } 2949 2950 pos = 0; 2951 step = (table_size >> 1) + (table_size >> 3) + 3; 2952 mask = table_size - 1; 2953 for (i = 0; i < idx; i++) 2954 { 2955 int n; 2956 int j; 2957 2958 n = (int) norm[i]; 2959 for (j = 0; j < n; j++) 2960 { 2961 table[pos].symbol = (unsigned char) i; 2962 pos = (pos + step) & mask; 2963 while (unlikely (pos > high_threshold)) 2964 pos = (pos + step) & mask; 2965 } 2966 } 2967 if (pos != 0) 2968 { 2969 elf_uncompress_failed (); 2970 return 0; 2971 } 2972 2973 for (i = 0; i < table_size; i++) 2974 { 2975 unsigned char sym; 2976 uint16_t next_state; 2977 int high_bit; 2978 int bits; 2979 2980 sym = table[i].symbol; 2981 next_state = next[sym]; 2982 ++next[sym]; 2983 2984 if (next_state == 0) 2985 { 2986 elf_uncompress_failed (); 2987 return 0; 2988 } 2989 high_bit = 31 - __builtin_clz (next_state); 2990 2991 bits = table_bits - high_bit; 2992 table[i].bits = (unsigned char) bits; 2993 table[i].base = (uint16_t) ((next_state << bits) - table_size); 2994 } 2995 2996 return 1; 2997} 2998 2999/* Encode the baseline and bits into a single 32-bit value. */ 3000 3001#define ZSTD_ENCODE_BASELINE_BITS(baseline, basebits) \ 3002 ((uint32_t)(baseline) | ((uint32_t)(basebits) << 24)) 3003 3004#define ZSTD_DECODE_BASELINE(baseline_basebits) \ 3005 ((uint32_t)(baseline_basebits) & 0xffffff) 3006 3007#define ZSTD_DECODE_BASEBITS(baseline_basebits) \ 3008 ((uint32_t)(baseline_basebits) >> 24) 3009 3010/* Given a literal length code, we need to read a number of bits and add that 3011 to a baseline. For states 0 to 15 the baseline is the state and the number 3012 of bits is zero. */ 3013 3014#define ZSTD_LITERAL_LENGTH_BASELINE_OFFSET (16) 3015 3016static const uint32_t elf_zstd_literal_length_base[] = 3017{ 3018 ZSTD_ENCODE_BASELINE_BITS(16, 1), 3019 ZSTD_ENCODE_BASELINE_BITS(18, 1), 3020 ZSTD_ENCODE_BASELINE_BITS(20, 1), 3021 ZSTD_ENCODE_BASELINE_BITS(22, 1), 3022 ZSTD_ENCODE_BASELINE_BITS(24, 2), 3023 ZSTD_ENCODE_BASELINE_BITS(28, 2), 3024 ZSTD_ENCODE_BASELINE_BITS(32, 3), 3025 ZSTD_ENCODE_BASELINE_BITS(40, 3), 3026 ZSTD_ENCODE_BASELINE_BITS(48, 4), 3027 ZSTD_ENCODE_BASELINE_BITS(64, 6), 3028 ZSTD_ENCODE_BASELINE_BITS(128, 7), 3029 ZSTD_ENCODE_BASELINE_BITS(256, 8), 3030 ZSTD_ENCODE_BASELINE_BITS(512, 9), 3031 ZSTD_ENCODE_BASELINE_BITS(1024, 10), 3032 ZSTD_ENCODE_BASELINE_BITS(2048, 11), 3033 ZSTD_ENCODE_BASELINE_BITS(4096, 12), 3034 ZSTD_ENCODE_BASELINE_BITS(8192, 13), 3035 ZSTD_ENCODE_BASELINE_BITS(16384, 14), 3036 ZSTD_ENCODE_BASELINE_BITS(32768, 15), 3037 ZSTD_ENCODE_BASELINE_BITS(65536, 16) 3038}; 3039 3040/* The same applies to match length codes. For states 0 to 31 the baseline is 3041 the state + 3 and the number of bits is zero. */ 3042 3043#define ZSTD_MATCH_LENGTH_BASELINE_OFFSET (32) 3044 3045static const uint32_t elf_zstd_match_length_base[] = 3046{ 3047 ZSTD_ENCODE_BASELINE_BITS(35, 1), 3048 ZSTD_ENCODE_BASELINE_BITS(37, 1), 3049 ZSTD_ENCODE_BASELINE_BITS(39, 1), 3050 ZSTD_ENCODE_BASELINE_BITS(41, 1), 3051 ZSTD_ENCODE_BASELINE_BITS(43, 2), 3052 ZSTD_ENCODE_BASELINE_BITS(47, 2), 3053 ZSTD_ENCODE_BASELINE_BITS(51, 3), 3054 ZSTD_ENCODE_BASELINE_BITS(59, 3), 3055 ZSTD_ENCODE_BASELINE_BITS(67, 4), 3056 ZSTD_ENCODE_BASELINE_BITS(83, 4), 3057 ZSTD_ENCODE_BASELINE_BITS(99, 5), 3058 ZSTD_ENCODE_BASELINE_BITS(131, 7), 3059 ZSTD_ENCODE_BASELINE_BITS(259, 8), 3060 ZSTD_ENCODE_BASELINE_BITS(515, 9), 3061 ZSTD_ENCODE_BASELINE_BITS(1027, 10), 3062 ZSTD_ENCODE_BASELINE_BITS(2051, 11), 3063 ZSTD_ENCODE_BASELINE_BITS(4099, 12), 3064 ZSTD_ENCODE_BASELINE_BITS(8195, 13), 3065 ZSTD_ENCODE_BASELINE_BITS(16387, 14), 3066 ZSTD_ENCODE_BASELINE_BITS(32771, 15), 3067 ZSTD_ENCODE_BASELINE_BITS(65539, 16) 3068}; 3069 3070/* An entry in an FSE table used for literal/match/length values. For these we 3071 have to map the symbol to a baseline value, and we have to read zero or more 3072 bits and add that value to the baseline value. Rather than look the values 3073 up in a separate table, we grow the FSE table so that we get better memory 3074 caching. */ 3075 3076struct elf_zstd_fse_baseline_entry 3077{ 3078 /* The baseline for the value that this FSE entry represents.. */ 3079 uint32_t baseline; 3080 /* The number of bits to read to add to the baseline. */ 3081 unsigned char basebits; 3082 /* The number of bits to read to determine the next state. */ 3083 unsigned char bits; 3084 /* Add the bits to this base to get the next state. */ 3085 uint16_t base; 3086}; 3087 3088/* Convert the literal length FSE table FSE_TABLE to an FSE baseline table at 3089 BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */ 3090 3091static int 3092elf_zstd_make_literal_baseline_fse ( 3093 const struct elf_zstd_fse_entry *fse_table, 3094 int table_bits, 3095 struct elf_zstd_fse_baseline_entry *baseline_table) 3096{ 3097 size_t count; 3098 const struct elf_zstd_fse_entry *pfse; 3099 struct elf_zstd_fse_baseline_entry *pbaseline; 3100 3101 /* Convert backward to avoid overlap. */ 3102 3103 count = 1U << table_bits; 3104 pfse = fse_table + count; 3105 pbaseline = baseline_table + count; 3106 while (pfse > fse_table) 3107 { 3108 unsigned char symbol; 3109 unsigned char bits; 3110 uint16_t base; 3111 3112 --pfse; 3113 --pbaseline; 3114 symbol = pfse->symbol; 3115 bits = pfse->bits; 3116 base = pfse->base; 3117 if (symbol < ZSTD_LITERAL_LENGTH_BASELINE_OFFSET) 3118 { 3119 pbaseline->baseline = (uint32_t)symbol; 3120 pbaseline->basebits = 0; 3121 } 3122 else 3123 { 3124 unsigned int idx; 3125 uint32_t basebits; 3126 3127 if (unlikely (symbol > 35)) 3128 { 3129 elf_uncompress_failed (); 3130 return 0; 3131 } 3132 idx = symbol - ZSTD_LITERAL_LENGTH_BASELINE_OFFSET; 3133 basebits = elf_zstd_literal_length_base[idx]; 3134 pbaseline->baseline = ZSTD_DECODE_BASELINE(basebits); 3135 pbaseline->basebits = ZSTD_DECODE_BASEBITS(basebits); 3136 } 3137 pbaseline->bits = bits; 3138 pbaseline->base = base; 3139 } 3140 3141 return 1; 3142} 3143 3144/* Convert the offset length FSE table FSE_TABLE to an FSE baseline table at 3145 BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */ 3146 3147static int 3148elf_zstd_make_offset_baseline_fse ( 3149 const struct elf_zstd_fse_entry *fse_table, 3150 int table_bits, 3151 struct elf_zstd_fse_baseline_entry *baseline_table) 3152{ 3153 size_t count; 3154 const struct elf_zstd_fse_entry *pfse; 3155 struct elf_zstd_fse_baseline_entry *pbaseline; 3156 3157 /* Convert backward to avoid overlap. */ 3158 3159 count = 1U << table_bits; 3160 pfse = fse_table + count; 3161 pbaseline = baseline_table + count; 3162 while (pfse > fse_table) 3163 { 3164 unsigned char symbol; 3165 unsigned char bits; 3166 uint16_t base; 3167 3168 --pfse; 3169 --pbaseline; 3170 symbol = pfse->symbol; 3171 bits = pfse->bits; 3172 base = pfse->base; 3173 if (unlikely (symbol > 31)) 3174 { 3175 elf_uncompress_failed (); 3176 return 0; 3177 } 3178 3179 /* The simple way to write this is 3180 3181 pbaseline->baseline = (uint32_t)1 << symbol; 3182 pbaseline->basebits = symbol; 3183 3184 That will give us an offset value that corresponds to the one 3185 described in the RFC. However, for offset values > 3, we have to 3186 subtract 3. And for offset values 1, 2, 3 we use a repeated offset. 3187 The baseline is always a power of 2, and is never 0, so for these low 3188 values we will see one entry that is baseline 1, basebits 0, and one 3189 entry that is baseline 2, basebits 1. All other entries will have 3190 baseline >= 4 and basebits >= 2. 3191 3192 So we can check for RFC offset <= 3 by checking for basebits <= 1. 3193 And that means that we can subtract 3 here and not worry about doing 3194 it in the hot loop. */ 3195 3196 pbaseline->baseline = (uint32_t)1 << symbol; 3197 if (symbol >= 2) 3198 pbaseline->baseline -= 3; 3199 pbaseline->basebits = symbol; 3200 pbaseline->bits = bits; 3201 pbaseline->base = base; 3202 } 3203 3204 return 1; 3205} 3206 3207/* Convert the match length FSE table FSE_TABLE to an FSE baseline table at 3208 BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */ 3209 3210static int 3211elf_zstd_make_match_baseline_fse ( 3212 const struct elf_zstd_fse_entry *fse_table, 3213 int table_bits, 3214 struct elf_zstd_fse_baseline_entry *baseline_table) 3215{ 3216 size_t count; 3217 const struct elf_zstd_fse_entry *pfse; 3218 struct elf_zstd_fse_baseline_entry *pbaseline; 3219 3220 /* Convert backward to avoid overlap. */ 3221 3222 count = 1U << table_bits; 3223 pfse = fse_table + count; 3224 pbaseline = baseline_table + count; 3225 while (pfse > fse_table) 3226 { 3227 unsigned char symbol; 3228 unsigned char bits; 3229 uint16_t base; 3230 3231 --pfse; 3232 --pbaseline; 3233 symbol = pfse->symbol; 3234 bits = pfse->bits; 3235 base = pfse->base; 3236 if (symbol < ZSTD_MATCH_LENGTH_BASELINE_OFFSET) 3237 { 3238 pbaseline->baseline = (uint32_t)symbol + 3; 3239 pbaseline->basebits = 0; 3240 } 3241 else 3242 { 3243 unsigned int idx; 3244 uint32_t basebits; 3245 3246 if (unlikely (symbol > 52)) 3247 { 3248 elf_uncompress_failed (); 3249 return 0; 3250 } 3251 idx = symbol - ZSTD_MATCH_LENGTH_BASELINE_OFFSET; 3252 basebits = elf_zstd_match_length_base[idx]; 3253 pbaseline->baseline = ZSTD_DECODE_BASELINE(basebits); 3254 pbaseline->basebits = ZSTD_DECODE_BASEBITS(basebits); 3255 } 3256 pbaseline->bits = bits; 3257 pbaseline->base = base; 3258 } 3259 3260 return 1; 3261} 3262 3263#ifdef BACKTRACE_GENERATE_ZSTD_FSE_TABLES 3264 3265/* Used to generate the predefined FSE decoding tables for zstd. */ 3266 3267#include <stdio.h> 3268 3269/* These values are straight from RFC 8878. */ 3270 3271static int16_t lit[36] = 3272{ 3273 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 3274 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, 3275 -1,-1,-1,-1 3276}; 3277 3278static int16_t match[53] = 3279{ 3280 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 3281 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3282 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1, 3283 -1,-1,-1,-1,-1 3284}; 3285 3286static int16_t offset[29] = 3287{ 3288 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 3289 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 3290}; 3291 3292static uint16_t next[256]; 3293 3294static void 3295print_table (const struct elf_zstd_fse_baseline_entry *table, size_t size) 3296{ 3297 size_t i; 3298 3299 printf ("{\n"); 3300 for (i = 0; i < size; i += 3) 3301 { 3302 int j; 3303 3304 printf (" "); 3305 for (j = 0; j < 3 && i + j < size; ++j) 3306 printf (" { %u, %d, %d, %d },", table[i + j].baseline, 3307 table[i + j].basebits, table[i + j].bits, 3308 table[i + j].base); 3309 printf ("\n"); 3310 } 3311 printf ("};\n"); 3312} 3313 3314int 3315main () 3316{ 3317 struct elf_zstd_fse_entry lit_table[64]; 3318 struct elf_zstd_fse_baseline_entry lit_baseline[64]; 3319 struct elf_zstd_fse_entry match_table[64]; 3320 struct elf_zstd_fse_baseline_entry match_baseline[64]; 3321 struct elf_zstd_fse_entry offset_table[32]; 3322 struct elf_zstd_fse_baseline_entry offset_baseline[32]; 3323 3324 if (!elf_zstd_build_fse (lit, sizeof lit / sizeof lit[0], next, 3325 6, lit_table)) 3326 { 3327 fprintf (stderr, "elf_zstd_build_fse failed\n"); 3328 exit (EXIT_FAILURE); 3329 } 3330 3331 if (!elf_zstd_make_literal_baseline_fse (lit_table, 6, lit_baseline)) 3332 { 3333 fprintf (stderr, "elf_zstd_make_literal_baseline_fse failed\n"); 3334 exit (EXIT_FAILURE); 3335 } 3336 3337 printf ("static const struct elf_zstd_fse_baseline_entry " 3338 "elf_zstd_lit_table[64] =\n"); 3339 print_table (lit_baseline, 3340 sizeof lit_baseline / sizeof lit_baseline[0]); 3341 printf ("\n"); 3342 3343 if (!elf_zstd_build_fse (match, sizeof match / sizeof match[0], next, 3344 6, match_table)) 3345 { 3346 fprintf (stderr, "elf_zstd_build_fse failed\n"); 3347 exit (EXIT_FAILURE); 3348 } 3349 3350 if (!elf_zstd_make_match_baseline_fse (match_table, 6, match_baseline)) 3351 { 3352 fprintf (stderr, "elf_zstd_make_match_baseline_fse failed\n"); 3353 exit (EXIT_FAILURE); 3354 } 3355 3356 printf ("static const struct elf_zstd_fse_baseline_entry " 3357 "elf_zstd_match_table[64] =\n"); 3358 print_table (match_baseline, 3359 sizeof match_baseline / sizeof match_baseline[0]); 3360 printf ("\n"); 3361 3362 if (!elf_zstd_build_fse (offset, sizeof offset / sizeof offset[0], next, 3363 5, offset_table)) 3364 { 3365 fprintf (stderr, "elf_zstd_build_fse failed\n"); 3366 exit (EXIT_FAILURE); 3367 } 3368 3369 if (!elf_zstd_make_offset_baseline_fse (offset_table, 5, offset_baseline)) 3370 { 3371 fprintf (stderr, "elf_zstd_make_offset_baseline_fse failed\n"); 3372 exit (EXIT_FAILURE); 3373 } 3374 3375 printf ("static const struct elf_zstd_fse_baseline_entry " 3376 "elf_zstd_offset_table[32] =\n"); 3377 print_table (offset_baseline, 3378 sizeof offset_baseline / sizeof offset_baseline[0]); 3379 printf ("\n"); 3380 3381 return 0; 3382} 3383 3384#endif 3385 3386/* The fixed tables generated by the #ifdef'ed out main function 3387 above. */ 3388 3389static const struct elf_zstd_fse_baseline_entry elf_zstd_lit_table[64] = 3390{ 3391 { 0, 0, 4, 0 }, { 0, 0, 4, 16 }, { 1, 0, 5, 32 }, 3392 { 3, 0, 5, 0 }, { 4, 0, 5, 0 }, { 6, 0, 5, 0 }, 3393 { 7, 0, 5, 0 }, { 9, 0, 5, 0 }, { 10, 0, 5, 0 }, 3394 { 12, 0, 5, 0 }, { 14, 0, 6, 0 }, { 16, 1, 5, 0 }, 3395 { 20, 1, 5, 0 }, { 22, 1, 5, 0 }, { 28, 2, 5, 0 }, 3396 { 32, 3, 5, 0 }, { 48, 4, 5, 0 }, { 64, 6, 5, 32 }, 3397 { 128, 7, 5, 0 }, { 256, 8, 6, 0 }, { 1024, 10, 6, 0 }, 3398 { 4096, 12, 6, 0 }, { 0, 0, 4, 32 }, { 1, 0, 4, 0 }, 3399 { 2, 0, 5, 0 }, { 4, 0, 5, 32 }, { 5, 0, 5, 0 }, 3400 { 7, 0, 5, 32 }, { 8, 0, 5, 0 }, { 10, 0, 5, 32 }, 3401 { 11, 0, 5, 0 }, { 13, 0, 6, 0 }, { 16, 1, 5, 32 }, 3402 { 18, 1, 5, 0 }, { 22, 1, 5, 32 }, { 24, 2, 5, 0 }, 3403 { 32, 3, 5, 32 }, { 40, 3, 5, 0 }, { 64, 6, 4, 0 }, 3404 { 64, 6, 4, 16 }, { 128, 7, 5, 32 }, { 512, 9, 6, 0 }, 3405 { 2048, 11, 6, 0 }, { 0, 0, 4, 48 }, { 1, 0, 4, 16 }, 3406 { 2, 0, 5, 32 }, { 3, 0, 5, 32 }, { 5, 0, 5, 32 }, 3407 { 6, 0, 5, 32 }, { 8, 0, 5, 32 }, { 9, 0, 5, 32 }, 3408 { 11, 0, 5, 32 }, { 12, 0, 5, 32 }, { 15, 0, 6, 0 }, 3409 { 18, 1, 5, 32 }, { 20, 1, 5, 32 }, { 24, 2, 5, 32 }, 3410 { 28, 2, 5, 32 }, { 40, 3, 5, 32 }, { 48, 4, 5, 32 }, 3411 { 65536, 16, 6, 0 }, { 32768, 15, 6, 0 }, { 16384, 14, 6, 0 }, 3412 { 8192, 13, 6, 0 }, 3413}; 3414 3415static const struct elf_zstd_fse_baseline_entry elf_zstd_match_table[64] = 3416{ 3417 { 3, 0, 6, 0 }, { 4, 0, 4, 0 }, { 5, 0, 5, 32 }, 3418 { 6, 0, 5, 0 }, { 8, 0, 5, 0 }, { 9, 0, 5, 0 }, 3419 { 11, 0, 5, 0 }, { 13, 0, 6, 0 }, { 16, 0, 6, 0 }, 3420 { 19, 0, 6, 0 }, { 22, 0, 6, 0 }, { 25, 0, 6, 0 }, 3421 { 28, 0, 6, 0 }, { 31, 0, 6, 0 }, { 34, 0, 6, 0 }, 3422 { 37, 1, 6, 0 }, { 41, 1, 6, 0 }, { 47, 2, 6, 0 }, 3423 { 59, 3, 6, 0 }, { 83, 4, 6, 0 }, { 131, 7, 6, 0 }, 3424 { 515, 9, 6, 0 }, { 4, 0, 4, 16 }, { 5, 0, 4, 0 }, 3425 { 6, 0, 5, 32 }, { 7, 0, 5, 0 }, { 9, 0, 5, 32 }, 3426 { 10, 0, 5, 0 }, { 12, 0, 6, 0 }, { 15, 0, 6, 0 }, 3427 { 18, 0, 6, 0 }, { 21, 0, 6, 0 }, { 24, 0, 6, 0 }, 3428 { 27, 0, 6, 0 }, { 30, 0, 6, 0 }, { 33, 0, 6, 0 }, 3429 { 35, 1, 6, 0 }, { 39, 1, 6, 0 }, { 43, 2, 6, 0 }, 3430 { 51, 3, 6, 0 }, { 67, 4, 6, 0 }, { 99, 5, 6, 0 }, 3431 { 259, 8, 6, 0 }, { 4, 0, 4, 32 }, { 4, 0, 4, 48 }, 3432 { 5, 0, 4, 16 }, { 7, 0, 5, 32 }, { 8, 0, 5, 32 }, 3433 { 10, 0, 5, 32 }, { 11, 0, 5, 32 }, { 14, 0, 6, 0 }, 3434 { 17, 0, 6, 0 }, { 20, 0, 6, 0 }, { 23, 0, 6, 0 }, 3435 { 26, 0, 6, 0 }, { 29, 0, 6, 0 }, { 32, 0, 6, 0 }, 3436 { 65539, 16, 6, 0 }, { 32771, 15, 6, 0 }, { 16387, 14, 6, 0 }, 3437 { 8195, 13, 6, 0 }, { 4099, 12, 6, 0 }, { 2051, 11, 6, 0 }, 3438 { 1027, 10, 6, 0 }, 3439}; 3440 3441static const struct elf_zstd_fse_baseline_entry elf_zstd_offset_table[32] = 3442{ 3443 { 1, 0, 5, 0 }, { 64, 6, 4, 0 }, { 512, 9, 5, 0 }, 3444 { 32768, 15, 5, 0 }, { 2097152, 21, 5, 0 }, { 8, 3, 5, 0 }, 3445 { 128, 7, 4, 0 }, { 4096, 12, 5, 0 }, { 262144, 18, 5, 0 }, 3446 { 8388608, 23, 5, 0 }, { 32, 5, 5, 0 }, { 256, 8, 4, 0 }, 3447 { 16384, 14, 5, 0 }, { 1048576, 20, 5, 0 }, { 4, 2, 5, 0 }, 3448 { 128, 7, 4, 16 }, { 2048, 11, 5, 0 }, { 131072, 17, 5, 0 }, 3449 { 4194304, 22, 5, 0 }, { 16, 4, 5, 0 }, { 256, 8, 4, 16 }, 3450 { 8192, 13, 5, 0 }, { 524288, 19, 5, 0 }, { 2, 1, 5, 0 }, 3451 { 64, 6, 4, 16 }, { 1024, 10, 5, 0 }, { 65536, 16, 5, 0 }, 3452 { 268435456, 28, 5, 0 }, { 134217728, 27, 5, 0 }, { 67108864, 26, 5, 0 }, 3453 { 33554432, 25, 5, 0 }, { 16777216, 24, 5, 0 }, 3454}; 3455 3456/* Read a zstd Huffman table and build the decoding table in *TABLE, reading 3457 and updating *PPIN. This sets *PTABLE_BITS to the number of bits of the 3458 table, such that the table length is 1 << *TABLE_BITS. ZDEBUG_TABLE is 3459 scratch space; it must be enough for 512 uint16_t values + 256 32-bit values 3460 (2048 bytes). Returns 1 on success, 0 on error. */ 3461 3462static int 3463elf_zstd_read_huff (const unsigned char **ppin, const unsigned char *pinend, 3464 uint16_t *zdebug_table, uint16_t *table, int *ptable_bits) 3465{ 3466 const unsigned char *pin; 3467 unsigned char hdr; 3468 unsigned char *weights; 3469 size_t count; 3470 uint32_t *weight_mark; 3471 size_t i; 3472 uint32_t weight_mask; 3473 size_t table_bits; 3474 3475 pin = *ppin; 3476 if (unlikely (pin >= pinend)) 3477 { 3478 elf_uncompress_failed (); 3479 return 0; 3480 } 3481 hdr = *pin; 3482 ++pin; 3483 3484 weights = (unsigned char *) zdebug_table; 3485 3486 if (hdr < 128) 3487 { 3488 /* Table is compressed using FSE. */ 3489 3490 struct elf_zstd_fse_entry *fse_table; 3491 int fse_table_bits; 3492 uint16_t *scratch; 3493 const unsigned char *pfse; 3494 const unsigned char *pback; 3495 uint64_t val; 3496 unsigned int bits; 3497 unsigned int state1, state2; 3498 3499 /* SCRATCH is used temporarily by elf_zstd_read_fse. It overlaps 3500 WEIGHTS. */ 3501 scratch = zdebug_table; 3502 fse_table = (struct elf_zstd_fse_entry *) (scratch + 512); 3503 fse_table_bits = 6; 3504 3505 pfse = pin; 3506 if (!elf_zstd_read_fse (&pfse, pinend, scratch, 255, fse_table, 3507 &fse_table_bits)) 3508 return 0; 3509 3510 if (unlikely (pin + hdr > pinend)) 3511 { 3512 elf_uncompress_failed (); 3513 return 0; 3514 } 3515 3516 /* We no longer need SCRATCH. Start recording weights. We need up to 3517 256 bytes of weights and 64 bytes of rank counts, so it won't overlap 3518 FSE_TABLE. */ 3519 3520 pback = pin + hdr - 1; 3521 3522 if (!elf_fetch_backward_init (&pback, pfse, &val, &bits)) 3523 return 0; 3524 3525 bits -= fse_table_bits; 3526 state1 = (val >> bits) & ((1U << fse_table_bits) - 1); 3527 bits -= fse_table_bits; 3528 state2 = (val >> bits) & ((1U << fse_table_bits) - 1); 3529 3530 /* There are two independent FSE streams, tracked by STATE1 and STATE2. 3531 We decode them alternately. */ 3532 3533 count = 0; 3534 while (1) 3535 { 3536 struct elf_zstd_fse_entry *pt; 3537 uint64_t v; 3538 3539 pt = &fse_table[state1]; 3540 3541 if (unlikely (pin < pinend) && bits < pt->bits) 3542 { 3543 if (unlikely (count >= 254)) 3544 { 3545 elf_uncompress_failed (); 3546 return 0; 3547 } 3548 weights[count] = (unsigned char) pt->symbol; 3549 weights[count + 1] = (unsigned char) fse_table[state2].symbol; 3550 count += 2; 3551 break; 3552 } 3553 3554 if (unlikely (pt->bits == 0)) 3555 v = 0; 3556 else 3557 { 3558 if (!elf_fetch_bits_backward (&pback, pfse, &val, &bits)) 3559 return 0; 3560 3561 bits -= pt->bits; 3562 v = (val >> bits) & (((uint64_t)1 << pt->bits) - 1); 3563 } 3564 3565 state1 = pt->base + v; 3566 3567 if (unlikely (count >= 255)) 3568 { 3569 elf_uncompress_failed (); 3570 return 0; 3571 } 3572 3573 weights[count] = pt->symbol; 3574 ++count; 3575 3576 pt = &fse_table[state2]; 3577 3578 if (unlikely (pin < pinend && bits < pt->bits)) 3579 { 3580 if (unlikely (count >= 254)) 3581 { 3582 elf_uncompress_failed (); 3583 return 0; 3584 } 3585 weights[count] = (unsigned char) pt->symbol; 3586 weights[count + 1] = (unsigned char) fse_table[state1].symbol; 3587 count += 2; 3588 break; 3589 } 3590 3591 if (unlikely (pt->bits == 0)) 3592 v = 0; 3593 else 3594 { 3595 if (!elf_fetch_bits_backward (&pback, pfse, &val, &bits)) 3596 return 0; 3597 3598 bits -= pt->bits; 3599 v = (val >> bits) & (((uint64_t)1 << pt->bits) - 1); 3600 } 3601 3602 state2 = pt->base + v; 3603 3604 if (unlikely (count >= 255)) 3605 { 3606 elf_uncompress_failed (); 3607 return 0; 3608 } 3609 3610 weights[count] = pt->symbol; 3611 ++count; 3612 } 3613 3614 pin += hdr; 3615 } 3616 else 3617 { 3618 /* Table is not compressed. Each weight is 4 bits. */ 3619 3620 count = hdr - 127; 3621 if (unlikely (pin + ((count + 1) / 2) >= pinend)) 3622 { 3623 elf_uncompress_failed (); 3624 return 0; 3625 } 3626 for (i = 0; i < count; i += 2) 3627 { 3628 unsigned char b; 3629 3630 b = *pin; 3631 ++pin; 3632 weights[i] = b >> 4; 3633 weights[i + 1] = b & 0xf; 3634 } 3635 } 3636 3637 weight_mark = (uint32_t *) (weights + 256); 3638 memset (weight_mark, 0, 12 * sizeof (uint32_t)); 3639 weight_mask = 0; 3640 for (i = 0; i < count; ++i) 3641 { 3642 unsigned char w; 3643 3644 w = weights[i]; 3645 if (unlikely (w > 12)) 3646 { 3647 elf_uncompress_failed (); 3648 return 0; 3649 } 3650 ++weight_mark[w]; 3651 if (w > 0) 3652 weight_mask += 1U << (w - 1); 3653 } 3654 if (unlikely (weight_mask == 0)) 3655 { 3656 elf_uncompress_failed (); 3657 return 0; 3658 } 3659 3660 table_bits = 32 - __builtin_clz (weight_mask); 3661 if (unlikely (table_bits > 11)) 3662 { 3663 elf_uncompress_failed (); 3664 return 0; 3665 } 3666 3667 /* Work out the last weight value, which is omitted because the weights must 3668 sum to a power of two. */ 3669 { 3670 uint32_t left; 3671 uint32_t high_bit; 3672 3673 left = ((uint32_t)1 << table_bits) - weight_mask; 3674 if (left == 0) 3675 { 3676 elf_uncompress_failed (); 3677 return 0; 3678 } 3679 high_bit = 31 - __builtin_clz (left); 3680 if (((uint32_t)1 << high_bit) != left) 3681 { 3682 elf_uncompress_failed (); 3683 return 0; 3684 } 3685 3686 if (unlikely (count >= 256)) 3687 { 3688 elf_uncompress_failed (); 3689 return 0; 3690 } 3691 3692 weights[count] = high_bit + 1; 3693 ++count; 3694 ++weight_mark[high_bit + 1]; 3695 } 3696 3697 if (weight_mark[1] < 2 || (weight_mark[1] & 1) != 0) 3698 { 3699 elf_uncompress_failed (); 3700 return 0; 3701 } 3702 3703 /* Change WEIGHT_MARK from a count of weights to the index of the first 3704 symbol for that weight. We shift the indexes to also store how many we 3705 hae seen so far, below. */ 3706 { 3707 uint32_t next; 3708 3709 next = 0; 3710 for (i = 0; i < table_bits; ++i) 3711 { 3712 uint32_t cur; 3713 3714 cur = next; 3715 next += weight_mark[i + 1] << i; 3716 weight_mark[i + 1] = cur; 3717 } 3718 } 3719 3720 for (i = 0; i < count; ++i) 3721 { 3722 unsigned char weight; 3723 uint32_t length; 3724 uint16_t tval; 3725 size_t start; 3726 uint32_t j; 3727 3728 weight = weights[i]; 3729 if (weight == 0) 3730 continue; 3731 3732 length = 1U << (weight - 1); 3733 tval = (i << 8) | (table_bits + 1 - weight); 3734 start = weight_mark[weight]; 3735 for (j = 0; j < length; ++j) 3736 table[start + j] = tval; 3737 weight_mark[weight] += length; 3738 } 3739 3740 *ppin = pin; 3741 *ptable_bits = (int)table_bits; 3742 3743 return 1; 3744} 3745 3746/* Read and decompress the literals and store them ending at POUTEND. This 3747 works because we are going to use all the literals in the output, so they 3748 must fit into the output buffer. HUFFMAN_TABLE, and PHUFFMAN_TABLE_BITS 3749 store the Huffman table across calls. SCRATCH is used to read a Huffman 3750 table. Store the start of the decompressed literals in *PPLIT. Update 3751 *PPIN. Return 1 on success, 0 on error. */ 3752 3753static int 3754elf_zstd_read_literals (const unsigned char **ppin, 3755 const unsigned char *pinend, 3756 unsigned char *pout, 3757 unsigned char *poutend, 3758 uint16_t *scratch, 3759 uint16_t *huffman_table, 3760 int *phuffman_table_bits, 3761 unsigned char **pplit) 3762{ 3763 const unsigned char *pin; 3764 unsigned char *plit; 3765 unsigned char hdr; 3766 uint32_t regenerated_size; 3767 uint32_t compressed_size; 3768 int streams; 3769 uint32_t total_streams_size; 3770 unsigned int huffman_table_bits; 3771 uint64_t huffman_mask; 3772 3773 pin = *ppin; 3774 if (unlikely (pin >= pinend)) 3775 { 3776 elf_uncompress_failed (); 3777 return 0; 3778 } 3779 hdr = *pin; 3780 ++pin; 3781 3782 if ((hdr & 3) == 0 || (hdr & 3) == 1) 3783 { 3784 int raw; 3785 3786 /* Raw_literals_Block or RLE_Literals_Block */ 3787 3788 raw = (hdr & 3) == 0; 3789 3790 switch ((hdr >> 2) & 3) 3791 { 3792 case 0: case 2: 3793 regenerated_size = hdr >> 3; 3794 break; 3795 case 1: 3796 if (unlikely (pin >= pinend)) 3797 { 3798 elf_uncompress_failed (); 3799 return 0; 3800 } 3801 regenerated_size = (hdr >> 4) + ((uint32_t)(*pin) << 4); 3802 ++pin; 3803 break; 3804 case 3: 3805 if (unlikely (pin + 1 >= pinend)) 3806 { 3807 elf_uncompress_failed (); 3808 return 0; 3809 } 3810 regenerated_size = ((hdr >> 4) 3811 + ((uint32_t)*pin << 4) 3812 + ((uint32_t)pin[1] << 12)); 3813 pin += 2; 3814 break; 3815 default: 3816 elf_uncompress_failed (); 3817 return 0; 3818 } 3819 3820 if (unlikely ((size_t)(poutend - pout) < regenerated_size)) 3821 { 3822 elf_uncompress_failed (); 3823 return 0; 3824 } 3825 3826 plit = poutend - regenerated_size; 3827 3828 if (raw) 3829 { 3830 if (unlikely (pin + regenerated_size >= pinend)) 3831 { 3832 elf_uncompress_failed (); 3833 return 0; 3834 } 3835 memcpy (plit, pin, regenerated_size); 3836 pin += regenerated_size; 3837 } 3838 else 3839 { 3840 if (pin >= pinend) 3841 { 3842 elf_uncompress_failed (); 3843 return 0; 3844 } 3845 memset (plit, *pin, regenerated_size); 3846 ++pin; 3847 } 3848 3849 *ppin = pin; 3850 *pplit = plit; 3851 3852 return 1; 3853 } 3854 3855 /* Compressed_Literals_Block or Treeless_Literals_Block */ 3856 3857 switch ((hdr >> 2) & 3) 3858 { 3859 case 0: case 1: 3860 if (unlikely (pin + 1 >= pinend)) 3861 { 3862 elf_uncompress_failed (); 3863 return 0; 3864 } 3865 regenerated_size = (hdr >> 4) | ((uint32_t)(*pin & 0x3f) << 4); 3866 compressed_size = (uint32_t)*pin >> 6 | ((uint32_t)pin[1] << 2); 3867 pin += 2; 3868 streams = ((hdr >> 2) & 3) == 0 ? 1 : 4; 3869 break; 3870 case 2: 3871 if (unlikely (pin + 2 >= pinend)) 3872 { 3873 elf_uncompress_failed (); 3874 return 0; 3875 } 3876 regenerated_size = (((uint32_t)hdr >> 4) 3877 | ((uint32_t)*pin << 4) 3878 | (((uint32_t)pin[1] & 3) << 12)); 3879 compressed_size = (((uint32_t)pin[1] >> 2) 3880 | ((uint32_t)pin[2] << 6)); 3881 pin += 3; 3882 streams = 4; 3883 break; 3884 case 3: 3885 if (unlikely (pin + 3 >= pinend)) 3886 { 3887 elf_uncompress_failed (); 3888 return 0; 3889 } 3890 regenerated_size = (((uint32_t)hdr >> 4) 3891 | ((uint32_t)*pin << 4) 3892 | (((uint32_t)pin[1] & 0x3f) << 12)); 3893 compressed_size = (((uint32_t)pin[1] >> 6) 3894 | ((uint32_t)pin[2] << 2) 3895 | ((uint32_t)pin[3] << 10)); 3896 pin += 4; 3897 streams = 4; 3898 break; 3899 default: 3900 elf_uncompress_failed (); 3901 return 0; 3902 } 3903 3904 if (unlikely (pin + compressed_size > pinend)) 3905 { 3906 elf_uncompress_failed (); 3907 return 0; 3908 } 3909 3910 pinend = pin + compressed_size; 3911 *ppin = pinend; 3912 3913 if (unlikely ((size_t)(poutend - pout) < regenerated_size)) 3914 { 3915 elf_uncompress_failed (); 3916 return 0; 3917 } 3918 3919 plit = poutend - regenerated_size; 3920 3921 *pplit = plit; 3922 3923 total_streams_size = compressed_size; 3924 if ((hdr & 3) == 2) 3925 { 3926 const unsigned char *ptable; 3927 3928 /* Compressed_Literals_Block. Read Huffman tree. */ 3929 3930 ptable = pin; 3931 if (!elf_zstd_read_huff (&ptable, pinend, scratch, huffman_table, 3932 phuffman_table_bits)) 3933 return 0; 3934 3935 if (unlikely (total_streams_size < (size_t)(ptable - pin))) 3936 { 3937 elf_uncompress_failed (); 3938 return 0; 3939 } 3940 3941 total_streams_size -= ptable - pin; 3942 pin = ptable; 3943 } 3944 else 3945 { 3946 /* Treeless_Literals_Block. Reuse previous Huffman tree. */ 3947 if (unlikely (*phuffman_table_bits == 0)) 3948 { 3949 elf_uncompress_failed (); 3950 return 0; 3951 } 3952 } 3953 3954 /* Decompress COMPRESSED_SIZE bytes of data at PIN using the huffman table, 3955 storing REGENERATED_SIZE bytes of decompressed data at PLIT. */ 3956 3957 huffman_table_bits = (unsigned int)*phuffman_table_bits; 3958 huffman_mask = ((uint64_t)1 << huffman_table_bits) - 1; 3959 3960 if (streams == 1) 3961 { 3962 const unsigned char *pback; 3963 const unsigned char *pbackend; 3964 uint64_t val; 3965 unsigned int bits; 3966 uint32_t i; 3967 3968 pback = pin + compressed_size - 1; 3969 pbackend = pin; 3970 if (!elf_fetch_backward_init (&pback, pbackend, &val, &bits)) 3971 return 0; 3972 3973 /* This is one of the inner loops of the decompression algorithm, so we 3974 put some effort into optimization. We can't get more than 64 bytes 3975 from a single call to elf_fetch_bits_backward, and we can't subtract 3976 more than 11 bits at a time. */ 3977 3978 if (regenerated_size >= 64) 3979 { 3980 unsigned char *plitstart; 3981 unsigned char *plitstop; 3982 3983 plitstart = plit; 3984 plitstop = plit + regenerated_size - 64; 3985 while (plit < plitstop) 3986 { 3987 uint16_t t; 3988 3989 if (!elf_fetch_bits_backward (&pback, pbackend, &val, &bits)) 3990 return 0; 3991 3992 if (bits < 16) 3993 break; 3994 3995 while (bits >= 33) 3996 { 3997 t = huffman_table[(val >> (bits - huffman_table_bits)) 3998 & huffman_mask]; 3999 *plit = t >> 8; 4000 ++plit; 4001 bits -= t & 0xff; 4002 4003 t = huffman_table[(val >> (bits - huffman_table_bits)) 4004 & huffman_mask]; 4005 *plit = t >> 8; 4006 ++plit; 4007 bits -= t & 0xff; 4008 4009 t = huffman_table[(val >> (bits - huffman_table_bits)) 4010 & huffman_mask]; 4011 *plit = t >> 8; 4012 ++plit; 4013 bits -= t & 0xff; 4014 } 4015 4016 while (bits > 11) 4017 { 4018 t = huffman_table[(val >> (bits - huffman_table_bits)) 4019 & huffman_mask]; 4020 *plit = t >> 8; 4021 ++plit; 4022 bits -= t & 0xff; 4023 } 4024 } 4025 4026 regenerated_size -= plit - plitstart; 4027 } 4028 4029 for (i = 0; i < regenerated_size; ++i) 4030 { 4031 uint16_t t; 4032 4033 if (!elf_fetch_bits_backward (&pback, pbackend, &val, &bits)) 4034 return 0; 4035 4036 if (unlikely (bits < huffman_table_bits)) 4037 { 4038 t = huffman_table[(val << (huffman_table_bits - bits)) 4039 & huffman_mask]; 4040 if (unlikely (bits < (t & 0xff))) 4041 { 4042 elf_uncompress_failed (); 4043 return 0; 4044 } 4045 } 4046 else 4047 t = huffman_table[(val >> (bits - huffman_table_bits)) 4048 & huffman_mask]; 4049 4050 *plit = t >> 8; 4051 ++plit; 4052 bits -= t & 0xff; 4053 } 4054 4055 return 1; 4056 } 4057 4058 { 4059 uint32_t stream_size1, stream_size2, stream_size3, stream_size4; 4060 uint32_t tot; 4061 const unsigned char *pback1, *pback2, *pback3, *pback4; 4062 const unsigned char *pbackend1, *pbackend2, *pbackend3, *pbackend4; 4063 uint64_t val1, val2, val3, val4; 4064 unsigned int bits1, bits2, bits3, bits4; 4065 unsigned char *plit1, *plit2, *plit3, *plit4; 4066 uint32_t regenerated_stream_size; 4067 uint32_t regenerated_stream_size4; 4068 uint16_t t1, t2, t3, t4; 4069 uint32_t i; 4070 uint32_t limit; 4071 4072 /* Read jump table. */ 4073 if (unlikely (pin + 5 >= pinend)) 4074 { 4075 elf_uncompress_failed (); 4076 return 0; 4077 } 4078 stream_size1 = (uint32_t)*pin | ((uint32_t)pin[1] << 8); 4079 pin += 2; 4080 stream_size2 = (uint32_t)*pin | ((uint32_t)pin[1] << 8); 4081 pin += 2; 4082 stream_size3 = (uint32_t)*pin | ((uint32_t)pin[1] << 8); 4083 pin += 2; 4084 tot = stream_size1 + stream_size2 + stream_size3; 4085 if (unlikely (tot > total_streams_size - 6)) 4086 { 4087 elf_uncompress_failed (); 4088 return 0; 4089 } 4090 stream_size4 = total_streams_size - 6 - tot; 4091 4092 pback1 = pin + stream_size1 - 1; 4093 pbackend1 = pin; 4094 4095 pback2 = pback1 + stream_size2; 4096 pbackend2 = pback1 + 1; 4097 4098 pback3 = pback2 + stream_size3; 4099 pbackend3 = pback2 + 1; 4100 4101 pback4 = pback3 + stream_size4; 4102 pbackend4 = pback3 + 1; 4103 4104 if (!elf_fetch_backward_init (&pback1, pbackend1, &val1, &bits1)) 4105 return 0; 4106 if (!elf_fetch_backward_init (&pback2, pbackend2, &val2, &bits2)) 4107 return 0; 4108 if (!elf_fetch_backward_init (&pback3, pbackend3, &val3, &bits3)) 4109 return 0; 4110 if (!elf_fetch_backward_init (&pback4, pbackend4, &val4, &bits4)) 4111 return 0; 4112 4113 regenerated_stream_size = (regenerated_size + 3) / 4; 4114 4115 plit1 = plit; 4116 plit2 = plit1 + regenerated_stream_size; 4117 plit3 = plit2 + regenerated_stream_size; 4118 plit4 = plit3 + regenerated_stream_size; 4119 4120 regenerated_stream_size4 = regenerated_size - regenerated_stream_size * 3; 4121 4122 /* We can't get more than 64 literal bytes from a single call to 4123 elf_fetch_bits_backward. The fourth stream can be up to 3 bytes less, 4124 so use as the limit. */ 4125 4126 limit = regenerated_stream_size4 <= 64 ? 0 : regenerated_stream_size4 - 64; 4127 i = 0; 4128 while (i < limit) 4129 { 4130 if (!elf_fetch_bits_backward (&pback1, pbackend1, &val1, &bits1)) 4131 return 0; 4132 if (!elf_fetch_bits_backward (&pback2, pbackend2, &val2, &bits2)) 4133 return 0; 4134 if (!elf_fetch_bits_backward (&pback3, pbackend3, &val3, &bits3)) 4135 return 0; 4136 if (!elf_fetch_bits_backward (&pback4, pbackend4, &val4, &bits4)) 4137 return 0; 4138 4139 /* We can't subtract more than 11 bits at a time. */ 4140 4141 do 4142 { 4143 t1 = huffman_table[(val1 >> (bits1 - huffman_table_bits)) 4144 & huffman_mask]; 4145 t2 = huffman_table[(val2 >> (bits2 - huffman_table_bits)) 4146 & huffman_mask]; 4147 t3 = huffman_table[(val3 >> (bits3 - huffman_table_bits)) 4148 & huffman_mask]; 4149 t4 = huffman_table[(val4 >> (bits4 - huffman_table_bits)) 4150 & huffman_mask]; 4151 4152 *plit1 = t1 >> 8; 4153 ++plit1; 4154 bits1 -= t1 & 0xff; 4155 4156 *plit2 = t2 >> 8; 4157 ++plit2; 4158 bits2 -= t2 & 0xff; 4159 4160 *plit3 = t3 >> 8; 4161 ++plit3; 4162 bits3 -= t3 & 0xff; 4163 4164 *plit4 = t4 >> 8; 4165 ++plit4; 4166 bits4 -= t4 & 0xff; 4167 4168 ++i; 4169 } 4170 while (bits1 > 11 && bits2 > 11 && bits3 > 11 && bits4 > 11); 4171 } 4172 4173 while (i < regenerated_stream_size) 4174 { 4175 int use4; 4176 4177 use4 = i < regenerated_stream_size4; 4178 4179 if (!elf_fetch_bits_backward (&pback1, pbackend1, &val1, &bits1)) 4180 return 0; 4181 if (!elf_fetch_bits_backward (&pback2, pbackend2, &val2, &bits2)) 4182 return 0; 4183 if (!elf_fetch_bits_backward (&pback3, pbackend3, &val3, &bits3)) 4184 return 0; 4185 if (use4) 4186 { 4187 if (!elf_fetch_bits_backward (&pback4, pbackend4, &val4, &bits4)) 4188 return 0; 4189 } 4190 4191 if (unlikely (bits1 < huffman_table_bits)) 4192 { 4193 t1 = huffman_table[(val1 << (huffman_table_bits - bits1)) 4194 & huffman_mask]; 4195 if (unlikely (bits1 < (t1 & 0xff))) 4196 { 4197 elf_uncompress_failed (); 4198 return 0; 4199 } 4200 } 4201 else 4202 t1 = huffman_table[(val1 >> (bits1 - huffman_table_bits)) 4203 & huffman_mask]; 4204 4205 if (unlikely (bits2 < huffman_table_bits)) 4206 { 4207 t2 = huffman_table[(val2 << (huffman_table_bits - bits2)) 4208 & huffman_mask]; 4209 if (unlikely (bits2 < (t2 & 0xff))) 4210 { 4211 elf_uncompress_failed (); 4212 return 0; 4213 } 4214 } 4215 else 4216 t2 = huffman_table[(val2 >> (bits2 - huffman_table_bits)) 4217 & huffman_mask]; 4218 4219 if (unlikely (bits3 < huffman_table_bits)) 4220 { 4221 t3 = huffman_table[(val3 << (huffman_table_bits - bits3)) 4222 & huffman_mask]; 4223 if (unlikely (bits3 < (t3 & 0xff))) 4224 { 4225 elf_uncompress_failed (); 4226 return 0; 4227 } 4228 } 4229 else 4230 t3 = huffman_table[(val3 >> (bits3 - huffman_table_bits)) 4231 & huffman_mask]; 4232 4233 if (use4) 4234 { 4235 if (unlikely (bits4 < huffman_table_bits)) 4236 { 4237 t4 = huffman_table[(val4 << (huffman_table_bits - bits4)) 4238 & huffman_mask]; 4239 if (unlikely (bits4 < (t4 & 0xff))) 4240 { 4241 elf_uncompress_failed (); 4242 return 0; 4243 } 4244 } 4245 else 4246 t4 = huffman_table[(val4 >> (bits4 - huffman_table_bits)) 4247 & huffman_mask]; 4248 4249 *plit4 = t4 >> 8; 4250 ++plit4; 4251 bits4 -= t4 & 0xff; 4252 } 4253 4254 *plit1 = t1 >> 8; 4255 ++plit1; 4256 bits1 -= t1 & 0xff; 4257 4258 *plit2 = t2 >> 8; 4259 ++plit2; 4260 bits2 -= t2 & 0xff; 4261 4262 *plit3 = t3 >> 8; 4263 ++plit3; 4264 bits3 -= t3 & 0xff; 4265 4266 ++i; 4267 } 4268 } 4269 4270 return 1; 4271} 4272 4273/* The information used to decompress a sequence code, which can be a literal 4274 length, an offset, or a match length. */ 4275 4276struct elf_zstd_seq_decode 4277{ 4278 const struct elf_zstd_fse_baseline_entry *table; 4279 int table_bits; 4280}; 4281 4282/* Unpack a sequence code compression mode. */ 4283 4284static int 4285elf_zstd_unpack_seq_decode (int mode, 4286 const unsigned char **ppin, 4287 const unsigned char *pinend, 4288 const struct elf_zstd_fse_baseline_entry *predef, 4289 int predef_bits, 4290 uint16_t *scratch, 4291 int maxidx, 4292 struct elf_zstd_fse_baseline_entry *table, 4293 int table_bits, 4294 int (*conv)(const struct elf_zstd_fse_entry *, 4295 int, 4296 struct elf_zstd_fse_baseline_entry *), 4297 struct elf_zstd_seq_decode *decode) 4298{ 4299 switch (mode) 4300 { 4301 case 0: 4302 decode->table = predef; 4303 decode->table_bits = predef_bits; 4304 break; 4305 4306 case 1: 4307 { 4308 struct elf_zstd_fse_entry entry; 4309 4310 if (unlikely (*ppin >= pinend)) 4311 { 4312 elf_uncompress_failed (); 4313 return 0; 4314 } 4315 entry.symbol = **ppin; 4316 ++*ppin; 4317 entry.bits = 0; 4318 entry.base = 0; 4319 decode->table_bits = 0; 4320 if (!conv (&entry, 0, table)) 4321 return 0; 4322 } 4323 break; 4324 4325 case 2: 4326 { 4327 struct elf_zstd_fse_entry *fse_table; 4328 4329 /* We use the same space for the simple FSE table and the baseline 4330 table. */ 4331 fse_table = (struct elf_zstd_fse_entry *)table; 4332 decode->table_bits = table_bits; 4333 if (!elf_zstd_read_fse (ppin, pinend, scratch, maxidx, fse_table, 4334 &decode->table_bits)) 4335 return 0; 4336 if (!conv (fse_table, decode->table_bits, table)) 4337 return 0; 4338 decode->table = table; 4339 } 4340 break; 4341 4342 case 3: 4343 if (unlikely (decode->table_bits == -1)) 4344 { 4345 elf_uncompress_failed (); 4346 return 0; 4347 } 4348 break; 4349 4350 default: 4351 elf_uncompress_failed (); 4352 return 0; 4353 } 4354 4355 return 1; 4356} 4357 4358/* Decompress a zstd stream from PIN/SIN to POUT/SOUT. Code based on RFC 8878. 4359 Return 1 on success, 0 on error. */ 4360 4361static int 4362elf_zstd_decompress (const unsigned char *pin, size_t sin, 4363 unsigned char *zdebug_table, unsigned char *pout, 4364 size_t sout) 4365{ 4366 const unsigned char *pinend; 4367 unsigned char *poutstart; 4368 unsigned char *poutend; 4369 struct elf_zstd_seq_decode literal_decode; 4370 struct elf_zstd_fse_baseline_entry *literal_fse_table; 4371 struct elf_zstd_seq_decode match_decode; 4372 struct elf_zstd_fse_baseline_entry *match_fse_table; 4373 struct elf_zstd_seq_decode offset_decode; 4374 struct elf_zstd_fse_baseline_entry *offset_fse_table; 4375 uint16_t *huffman_table; 4376 int huffman_table_bits; 4377 uint32_t repeated_offset1; 4378 uint32_t repeated_offset2; 4379 uint32_t repeated_offset3; 4380 uint16_t *scratch; 4381 unsigned char hdr; 4382 int has_checksum; 4383 uint64_t content_size; 4384 int last_block; 4385 4386 pinend = pin + sin; 4387 poutstart = pout; 4388 poutend = pout + sout; 4389 4390 literal_decode.table = NULL; 4391 literal_decode.table_bits = -1; 4392 literal_fse_table = ((struct elf_zstd_fse_baseline_entry *) 4393 (zdebug_table + ZSTD_TABLE_LITERAL_FSE_OFFSET)); 4394 4395 match_decode.table = NULL; 4396 match_decode.table_bits = -1; 4397 match_fse_table = ((struct elf_zstd_fse_baseline_entry *) 4398 (zdebug_table + ZSTD_TABLE_MATCH_FSE_OFFSET)); 4399 4400 offset_decode.table = NULL; 4401 offset_decode.table_bits = -1; 4402 offset_fse_table = ((struct elf_zstd_fse_baseline_entry *) 4403 (zdebug_table + ZSTD_TABLE_OFFSET_FSE_OFFSET)); 4404 huffman_table = ((uint16_t *) 4405 (zdebug_table + ZSTD_TABLE_HUFFMAN_OFFSET)); 4406 huffman_table_bits = 0; 4407 scratch = ((uint16_t *) 4408 (zdebug_table + ZSTD_TABLE_WORK_OFFSET)); 4409 4410 repeated_offset1 = 1; 4411 repeated_offset2 = 4; 4412 repeated_offset3 = 8; 4413 4414 if (unlikely (sin < 4)) 4415 { 4416 elf_uncompress_failed (); 4417 return 0; 4418 } 4419 4420 /* These values are the zstd magic number. */ 4421 if (unlikely (pin[0] != 0x28 4422 || pin[1] != 0xb5 4423 || pin[2] != 0x2f 4424 || pin[3] != 0xfd)) 4425 { 4426 elf_uncompress_failed (); 4427 return 0; 4428 } 4429 4430 pin += 4; 4431 4432 if (unlikely (pin >= pinend)) 4433 { 4434 elf_uncompress_failed (); 4435 return 0; 4436 } 4437 4438 hdr = *pin++; 4439 4440 /* We expect a single frame. */ 4441 if (unlikely ((hdr & (1 << 5)) == 0)) 4442 { 4443 elf_uncompress_failed (); 4444 return 0; 4445 } 4446 /* Reserved bit must be zero. */ 4447 if (unlikely ((hdr & (1 << 3)) != 0)) 4448 { 4449 elf_uncompress_failed (); 4450 return 0; 4451 } 4452 /* We do not expect a dictionary. */ 4453 if (unlikely ((hdr & 3) != 0)) 4454 { 4455 elf_uncompress_failed (); 4456 return 0; 4457 } 4458 has_checksum = (hdr & (1 << 2)) != 0; 4459 switch (hdr >> 6) 4460 { 4461 case 0: 4462 if (unlikely (pin >= pinend)) 4463 { 4464 elf_uncompress_failed (); 4465 return 0; 4466 } 4467 content_size = (uint64_t) *pin++; 4468 break; 4469 case 1: 4470 if (unlikely (pin + 1 >= pinend)) 4471 { 4472 elf_uncompress_failed (); 4473 return 0; 4474 } 4475 content_size = (((uint64_t) pin[0]) | (((uint64_t) pin[1]) << 8)) + 256; 4476 pin += 2; 4477 break; 4478 case 2: 4479 if (unlikely (pin + 3 >= pinend)) 4480 { 4481 elf_uncompress_failed (); 4482 return 0; 4483 } 4484 content_size = ((uint64_t) pin[0] 4485 | (((uint64_t) pin[1]) << 8) 4486 | (((uint64_t) pin[2]) << 16) 4487 | (((uint64_t) pin[3]) << 24)); 4488 pin += 4; 4489 break; 4490 case 3: 4491 if (unlikely (pin + 7 >= pinend)) 4492 { 4493 elf_uncompress_failed (); 4494 return 0; 4495 } 4496 content_size = ((uint64_t) pin[0] 4497 | (((uint64_t) pin[1]) << 8) 4498 | (((uint64_t) pin[2]) << 16) 4499 | (((uint64_t) pin[3]) << 24) 4500 | (((uint64_t) pin[4]) << 32) 4501 | (((uint64_t) pin[5]) << 40) 4502 | (((uint64_t) pin[6]) << 48) 4503 | (((uint64_t) pin[7]) << 56)); 4504 pin += 8; 4505 break; 4506 default: 4507 elf_uncompress_failed (); 4508 return 0; 4509 } 4510 4511 if (unlikely (content_size != (size_t) content_size 4512 || (size_t) content_size != sout)) 4513 { 4514 elf_uncompress_failed (); 4515 return 0; 4516 } 4517 4518 last_block = 0; 4519 while (!last_block) 4520 { 4521 uint32_t block_hdr; 4522 int block_type; 4523 uint32_t block_size; 4524 4525 if (unlikely (pin + 2 >= pinend)) 4526 { 4527 elf_uncompress_failed (); 4528 return 0; 4529 } 4530 block_hdr = ((uint32_t) pin[0] 4531 | (((uint32_t) pin[1]) << 8) 4532 | (((uint32_t) pin[2]) << 16)); 4533 pin += 3; 4534 4535 last_block = block_hdr & 1; 4536 block_type = (block_hdr >> 1) & 3; 4537 block_size = block_hdr >> 3; 4538 4539 switch (block_type) 4540 { 4541 case 0: 4542 /* Raw_Block */ 4543 if (unlikely ((size_t) block_size > (size_t) (pinend - pin))) 4544 { 4545 elf_uncompress_failed (); 4546 return 0; 4547 } 4548 if (unlikely ((size_t) block_size > (size_t) (poutend - pout))) 4549 { 4550 elf_uncompress_failed (); 4551 return 0; 4552 } 4553 memcpy (pout, pin, block_size); 4554 pout += block_size; 4555 pin += block_size; 4556 break; 4557 4558 case 1: 4559 /* RLE_Block */ 4560 if (unlikely (pin >= pinend)) 4561 { 4562 elf_uncompress_failed (); 4563 return 0; 4564 } 4565 if (unlikely ((size_t) block_size > (size_t) (poutend - pout))) 4566 { 4567 elf_uncompress_failed (); 4568 return 0; 4569 } 4570 memset (pout, *pin, block_size); 4571 pout += block_size; 4572 pin++; 4573 break; 4574 4575 case 2: 4576 { 4577 const unsigned char *pblockend; 4578 unsigned char *plitstack; 4579 unsigned char *plit; 4580 uint32_t literal_count; 4581 unsigned char seq_hdr; 4582 size_t seq_count; 4583 size_t seq; 4584 const unsigned char *pback; 4585 uint64_t val; 4586 unsigned int bits; 4587 unsigned int literal_state; 4588 unsigned int offset_state; 4589 unsigned int match_state; 4590 4591 /* Compressed_Block */ 4592 if (unlikely ((size_t) block_size > (size_t) (pinend - pin))) 4593 { 4594 elf_uncompress_failed (); 4595 return 0; 4596 } 4597 4598 pblockend = pin + block_size; 4599 4600 /* Read the literals into the end of the output space, and leave 4601 PLIT pointing at them. */ 4602 4603 if (!elf_zstd_read_literals (&pin, pblockend, pout, poutend, 4604 scratch, huffman_table, 4605 &huffman_table_bits, 4606 &plitstack)) 4607 return 0; 4608 plit = plitstack; 4609 literal_count = poutend - plit; 4610 4611 seq_hdr = *pin; 4612 pin++; 4613 if (seq_hdr < 128) 4614 seq_count = seq_hdr; 4615 else if (seq_hdr < 255) 4616 { 4617 if (unlikely (pin >= pinend)) 4618 { 4619 elf_uncompress_failed (); 4620 return 0; 4621 } 4622 seq_count = ((seq_hdr - 128) << 8) + *pin; 4623 pin++; 4624 } 4625 else 4626 { 4627 if (unlikely (pin + 1 >= pinend)) 4628 { 4629 elf_uncompress_failed (); 4630 return 0; 4631 } 4632 seq_count = *pin + (pin[1] << 8) + 0x7f00; 4633 pin += 2; 4634 } 4635 4636 if (seq_count > 0) 4637 { 4638 int (*pfn)(const struct elf_zstd_fse_entry *, 4639 int, struct elf_zstd_fse_baseline_entry *); 4640 4641 if (unlikely (pin >= pinend)) 4642 { 4643 elf_uncompress_failed (); 4644 return 0; 4645 } 4646 seq_hdr = *pin; 4647 ++pin; 4648 4649 pfn = elf_zstd_make_literal_baseline_fse; 4650 if (!elf_zstd_unpack_seq_decode ((seq_hdr >> 6) & 3, 4651 &pin, pinend, 4652 &elf_zstd_lit_table[0], 6, 4653 scratch, 35, 4654 literal_fse_table, 9, pfn, 4655 &literal_decode)) 4656 return 0; 4657 4658 pfn = elf_zstd_make_offset_baseline_fse; 4659 if (!elf_zstd_unpack_seq_decode ((seq_hdr >> 4) & 3, 4660 &pin, pinend, 4661 &elf_zstd_offset_table[0], 5, 4662 scratch, 31, 4663 offset_fse_table, 8, pfn, 4664 &offset_decode)) 4665 return 0; 4666 4667 pfn = elf_zstd_make_match_baseline_fse; 4668 if (!elf_zstd_unpack_seq_decode ((seq_hdr >> 2) & 3, 4669 &pin, pinend, 4670 &elf_zstd_match_table[0], 6, 4671 scratch, 52, 4672 match_fse_table, 9, pfn, 4673 &match_decode)) 4674 return 0; 4675 } 4676 4677 pback = pblockend - 1; 4678 if (!elf_fetch_backward_init (&pback, pin, &val, &bits)) 4679 return 0; 4680 4681 bits -= literal_decode.table_bits; 4682 literal_state = ((val >> bits) 4683 & ((1U << literal_decode.table_bits) - 1)); 4684 4685 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits)) 4686 return 0; 4687 bits -= offset_decode.table_bits; 4688 offset_state = ((val >> bits) 4689 & ((1U << offset_decode.table_bits) - 1)); 4690 4691 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits)) 4692 return 0; 4693 bits -= match_decode.table_bits; 4694 match_state = ((val >> bits) 4695 & ((1U << match_decode.table_bits) - 1)); 4696 4697 seq = 0; 4698 while (1) 4699 { 4700 const struct elf_zstd_fse_baseline_entry *pt; 4701 uint32_t offset_basebits; 4702 uint32_t offset_baseline; 4703 uint32_t offset_bits; 4704 uint32_t offset_base; 4705 uint32_t offset; 4706 uint32_t match_baseline; 4707 uint32_t match_bits; 4708 uint32_t match_base; 4709 uint32_t match; 4710 uint32_t literal_baseline; 4711 uint32_t literal_bits; 4712 uint32_t literal_base; 4713 uint32_t literal; 4714 uint32_t need; 4715 uint32_t add; 4716 4717 pt = &offset_decode.table[offset_state]; 4718 offset_basebits = pt->basebits; 4719 offset_baseline = pt->baseline; 4720 offset_bits = pt->bits; 4721 offset_base = pt->base; 4722 4723 /* This case can be more than 16 bits, which is all that 4724 elf_fetch_bits_backward promises. */ 4725 need = offset_basebits; 4726 add = 0; 4727 if (unlikely (need > 16)) 4728 { 4729 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits)) 4730 return 0; 4731 bits -= 16; 4732 add = (val >> bits) & ((1U << 16) - 1); 4733 need -= 16; 4734 add <<= need; 4735 } 4736 if (need > 0) 4737 { 4738 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits)) 4739 return 0; 4740 bits -= need; 4741 add += (val >> bits) & ((1U << need) - 1); 4742 } 4743 4744 offset = offset_baseline + add; 4745 4746 pt = &match_decode.table[match_state]; 4747 need = pt->basebits; 4748 match_baseline = pt->baseline; 4749 match_bits = pt->bits; 4750 match_base = pt->base; 4751 4752 add = 0; 4753 if (need > 0) 4754 { 4755 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits)) 4756 return 0; 4757 bits -= need; 4758 add = (val >> bits) & ((1U << need) - 1); 4759 } 4760 4761 match = match_baseline + add; 4762 4763 pt = &literal_decode.table[literal_state]; 4764 need = pt->basebits; 4765 literal_baseline = pt->baseline; 4766 literal_bits = pt->bits; 4767 literal_base = pt->base; 4768 4769 add = 0; 4770 if (need > 0) 4771 { 4772 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits)) 4773 return 0; 4774 bits -= need; 4775 add = (val >> bits) & ((1U << need) - 1); 4776 } 4777 4778 literal = literal_baseline + add; 4779 4780 /* See the comment in elf_zstd_make_offset_baseline_fse. */ 4781 if (offset_basebits > 1) 4782 { 4783 repeated_offset3 = repeated_offset2; 4784 repeated_offset2 = repeated_offset1; 4785 repeated_offset1 = offset; 4786 } 4787 else 4788 { 4789 if (unlikely (literal == 0)) 4790 ++offset; 4791 switch (offset) 4792 { 4793 case 1: 4794 offset = repeated_offset1; 4795 break; 4796 case 2: 4797 offset = repeated_offset2; 4798 repeated_offset2 = repeated_offset1; 4799 repeated_offset1 = offset; 4800 break; 4801 case 3: 4802 offset = repeated_offset3; 4803 repeated_offset3 = repeated_offset2; 4804 repeated_offset2 = repeated_offset1; 4805 repeated_offset1 = offset; 4806 break; 4807 case 4: 4808 offset = repeated_offset1 - 1; 4809 repeated_offset3 = repeated_offset2; 4810 repeated_offset2 = repeated_offset1; 4811 repeated_offset1 = offset; 4812 break; 4813 } 4814 } 4815 4816 ++seq; 4817 if (seq < seq_count) 4818 { 4819 uint32_t v; 4820 4821 /* Update the three states. */ 4822 4823 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits)) 4824 return 0; 4825 4826 need = literal_bits; 4827 bits -= need; 4828 v = (val >> bits) & (((uint32_t)1 << need) - 1); 4829 4830 literal_state = literal_base + v; 4831 4832 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits)) 4833 return 0; 4834 4835 need = match_bits; 4836 bits -= need; 4837 v = (val >> bits) & (((uint32_t)1 << need) - 1); 4838 4839 match_state = match_base + v; 4840 4841 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits)) 4842 return 0; 4843 4844 need = offset_bits; 4845 bits -= need; 4846 v = (val >> bits) & (((uint32_t)1 << need) - 1); 4847 4848 offset_state = offset_base + v; 4849 } 4850 4851 /* The next sequence is now in LITERAL, OFFSET, MATCH. */ 4852 4853 /* Copy LITERAL bytes from the literals. */ 4854 4855 if (unlikely ((size_t)(poutend - pout) < literal)) 4856 { 4857 elf_uncompress_failed (); 4858 return 0; 4859 } 4860 4861 if (unlikely (literal_count < literal)) 4862 { 4863 elf_uncompress_failed (); 4864 return 0; 4865 } 4866 4867 literal_count -= literal; 4868 4869 /* Often LITERAL is small, so handle small cases quickly. */ 4870 switch (literal) 4871 { 4872 case 8: 4873 *pout++ = *plit++; 4874 /* FALLTHROUGH */ 4875 case 7: 4876 *pout++ = *plit++; 4877 /* FALLTHROUGH */ 4878 case 6: 4879 *pout++ = *plit++; 4880 /* FALLTHROUGH */ 4881 case 5: 4882 *pout++ = *plit++; 4883 /* FALLTHROUGH */ 4884 case 4: 4885 *pout++ = *plit++; 4886 /* FALLTHROUGH */ 4887 case 3: 4888 *pout++ = *plit++; 4889 /* FALLTHROUGH */ 4890 case 2: 4891 *pout++ = *plit++; 4892 /* FALLTHROUGH */ 4893 case 1: 4894 *pout++ = *plit++; 4895 break; 4896 4897 case 0: 4898 break; 4899 4900 default: 4901 if (unlikely ((size_t)(plit - pout) < literal)) 4902 { 4903 uint32_t move; 4904 4905 move = plit - pout; 4906 while (literal > move) 4907 { 4908 memcpy (pout, plit, move); 4909 pout += move; 4910 plit += move; 4911 literal -= move; 4912 } 4913 } 4914 4915 memcpy (pout, plit, literal); 4916 pout += literal; 4917 plit += literal; 4918 } 4919 4920 if (match > 0) 4921 { 4922 /* Copy MATCH bytes from the decoded output at OFFSET. */ 4923 4924 if (unlikely ((size_t)(poutend - pout) < match)) 4925 { 4926 elf_uncompress_failed (); 4927 return 0; 4928 } 4929 4930 if (unlikely ((size_t)(pout - poutstart) < offset)) 4931 { 4932 elf_uncompress_failed (); 4933 return 0; 4934 } 4935 4936 if (offset >= match) 4937 { 4938 memcpy (pout, pout - offset, match); 4939 pout += match; 4940 } 4941 else 4942 { 4943 while (match > 0) 4944 { 4945 uint32_t copy; 4946 4947 copy = match < offset ? match : offset; 4948 memcpy (pout, pout - offset, copy); 4949 match -= copy; 4950 pout += copy; 4951 } 4952 } 4953 } 4954 4955 if (unlikely (seq >= seq_count)) 4956 { 4957 /* Copy remaining literals. */ 4958 if (literal_count > 0 && plit != pout) 4959 { 4960 if (unlikely ((size_t)(poutend - pout) 4961 < literal_count)) 4962 { 4963 elf_uncompress_failed (); 4964 return 0; 4965 } 4966 4967 if ((size_t)(plit - pout) < literal_count) 4968 { 4969 uint32_t move; 4970 4971 move = plit - pout; 4972 while (literal_count > move) 4973 { 4974 memcpy (pout, plit, move); 4975 pout += move; 4976 plit += move; 4977 literal_count -= move; 4978 } 4979 } 4980 4981 memcpy (pout, plit, literal_count); 4982 } 4983 4984 pout += literal_count; 4985 4986 break; 4987 } 4988 } 4989 4990 pin = pblockend; 4991 } 4992 break; 4993 4994 case 3: 4995 default: 4996 elf_uncompress_failed (); 4997 return 0; 4998 } 4999 } 5000 5001 if (has_checksum) 5002 { 5003 if (unlikely (pin + 4 > pinend)) 5004 { 5005 elf_uncompress_failed (); 5006 return 0; 5007 } 5008 5009 /* We don't currently verify the checksum. Currently running GNU ld with 5010 --compress-debug-sections=zstd does not seem to generate a 5011 checksum. */ 5012 5013 pin += 4; 5014 } 5015 5016 if (pin != pinend) 5017 { 5018 elf_uncompress_failed (); 5019 return 0; 5020 } 5021 5022 return 1; 5023} 5024 5025#define ZDEBUG_TABLE_SIZE \ 5026 (ZLIB_TABLE_SIZE > ZSTD_TABLE_SIZE ? ZLIB_TABLE_SIZE : ZSTD_TABLE_SIZE) 5027 5028/* Uncompress the old compressed debug format, the one emitted by 5029 --compress-debug-sections=zlib-gnu. The compressed data is in 5030 COMPRESSED / COMPRESSED_SIZE, and the function writes to 5031 *UNCOMPRESSED / *UNCOMPRESSED_SIZE. ZDEBUG_TABLE is work space to 5032 hold Huffman tables. Returns 0 on error, 1 on successful 5033 decompression or if something goes wrong. In general we try to 5034 carry on, by returning 1, even if we can't decompress. */ 5035 5036static int 5037elf_uncompress_zdebug (struct backtrace_state *state, 5038 const unsigned char *compressed, size_t compressed_size, 5039 uint16_t *zdebug_table, 5040 backtrace_error_callback error_callback, void *data, 5041 unsigned char **uncompressed, size_t *uncompressed_size) 5042{ 5043 size_t sz; 5044 size_t i; 5045 unsigned char *po; 5046 5047 *uncompressed = NULL; 5048 *uncompressed_size = 0; 5049 5050 /* The format starts with the four bytes ZLIB, followed by the 8 5051 byte length of the uncompressed data in big-endian order, 5052 followed by a zlib stream. */ 5053 5054 if (compressed_size < 12 || memcmp (compressed, "ZLIB", 4) != 0) 5055 return 1; 5056 5057 sz = 0; 5058 for (i = 0; i < 8; i++) 5059 sz = (sz << 8) | compressed[i + 4]; 5060 5061 if (*uncompressed != NULL && *uncompressed_size >= sz) 5062 po = *uncompressed; 5063 else 5064 { 5065 po = (unsigned char *) backtrace_alloc (state, sz, error_callback, data); 5066 if (po == NULL) 5067 return 0; 5068 } 5069 5070 if (!elf_zlib_inflate_and_verify (compressed + 12, compressed_size - 12, 5071 zdebug_table, po, sz)) 5072 return 1; 5073 5074 *uncompressed = po; 5075 *uncompressed_size = sz; 5076 5077 return 1; 5078} 5079 5080/* Uncompress the new compressed debug format, the official standard 5081 ELF approach emitted by --compress-debug-sections=zlib-gabi. The 5082 compressed data is in COMPRESSED / COMPRESSED_SIZE, and the 5083 function writes to *UNCOMPRESSED / *UNCOMPRESSED_SIZE. 5084 ZDEBUG_TABLE is work space as for elf_uncompress_zdebug. Returns 0 5085 on error, 1 on successful decompression or if something goes wrong. 5086 In general we try to carry on, by returning 1, even if we can't 5087 decompress. */ 5088 5089static int 5090elf_uncompress_chdr (struct backtrace_state *state, 5091 const unsigned char *compressed, size_t compressed_size, 5092 uint16_t *zdebug_table, 5093 backtrace_error_callback error_callback, void *data, 5094 unsigned char **uncompressed, size_t *uncompressed_size) 5095{ 5096 const b_elf_chdr *chdr; 5097 char *alc; 5098 size_t alc_len; 5099 unsigned char *po; 5100 5101 *uncompressed = NULL; 5102 *uncompressed_size = 0; 5103 5104 /* The format starts with an ELF compression header. */ 5105 if (compressed_size < sizeof (b_elf_chdr)) 5106 return 1; 5107 5108 chdr = (const b_elf_chdr *) compressed; 5109 5110 alc = NULL; 5111 alc_len = 0; 5112 if (*uncompressed != NULL && *uncompressed_size >= chdr->ch_size) 5113 po = *uncompressed; 5114 else 5115 { 5116 alc_len = chdr->ch_size; 5117 alc = (char*)backtrace_alloc (state, alc_len, error_callback, data); 5118 if (alc == NULL) 5119 return 0; 5120 po = (unsigned char *) alc; 5121 } 5122 5123 switch (chdr->ch_type) 5124 { 5125 case ELFCOMPRESS_ZLIB: 5126 if (!elf_zlib_inflate_and_verify (compressed + sizeof (b_elf_chdr), 5127 compressed_size - sizeof (b_elf_chdr), 5128 zdebug_table, po, chdr->ch_size)) 5129 goto skip; 5130 break; 5131 5132 case ELFCOMPRESS_ZSTD: 5133 if (!elf_zstd_decompress (compressed + sizeof (b_elf_chdr), 5134 compressed_size - sizeof (b_elf_chdr), 5135 (unsigned char *)zdebug_table, po, 5136 chdr->ch_size)) 5137 goto skip; 5138 break; 5139 5140 default: 5141 /* Unsupported compression algorithm. */ 5142 goto skip; 5143 } 5144 5145 *uncompressed = po; 5146 *uncompressed_size = chdr->ch_size; 5147 5148 return 1; 5149 5150 skip: 5151 if (alc != NULL && alc_len > 0) 5152 backtrace_free (state, alc, alc_len, error_callback, data); 5153 return 1; 5154} 5155 5156/* This function is a hook for testing the zlib support. It is only 5157 used by tests. */ 5158 5159int 5160backtrace_uncompress_zdebug (struct backtrace_state *state, 5161 const unsigned char *compressed, 5162 size_t compressed_size, 5163 backtrace_error_callback error_callback, 5164 void *data, unsigned char **uncompressed, 5165 size_t *uncompressed_size) 5166{ 5167 uint16_t *zdebug_table; 5168 int ret; 5169 5170 zdebug_table = ((uint16_t *) backtrace_alloc (state, ZDEBUG_TABLE_SIZE, 5171 error_callback, data)); 5172 if (zdebug_table == NULL) 5173 return 0; 5174 ret = elf_uncompress_zdebug (state, compressed, compressed_size, 5175 zdebug_table, error_callback, data, 5176 uncompressed, uncompressed_size); 5177 backtrace_free (state, zdebug_table, ZDEBUG_TABLE_SIZE, 5178 error_callback, data); 5179 return ret; 5180} 5181 5182/* This function is a hook for testing the zstd support. It is only used by 5183 tests. */ 5184 5185int 5186backtrace_uncompress_zstd (struct backtrace_state *state, 5187 const unsigned char *compressed, 5188 size_t compressed_size, 5189 backtrace_error_callback error_callback, 5190 void *data, unsigned char *uncompressed, 5191 size_t uncompressed_size) 5192{ 5193 unsigned char *zdebug_table; 5194 int ret; 5195 5196 zdebug_table = ((unsigned char *) backtrace_alloc (state, ZDEBUG_TABLE_SIZE, 5197 error_callback, data)); 5198 if (zdebug_table == NULL) 5199 return 0; 5200 ret = elf_zstd_decompress (compressed, compressed_size, 5201 zdebug_table, uncompressed, uncompressed_size); 5202 backtrace_free (state, zdebug_table, ZDEBUG_TABLE_SIZE, 5203 error_callback, data); 5204 return ret; 5205} 5206 5207/* Number of LZMA states. */ 5208#define LZMA_STATES (12) 5209 5210/* Number of LZMA position states. The pb value of the property byte 5211 is the number of bits to include in these states, and the maximum 5212 value of pb is 4. */ 5213#define LZMA_POS_STATES (16) 5214 5215/* Number of LZMA distance states. These are used match distances 5216 with a short match length: up to 4 bytes. */ 5217#define LZMA_DIST_STATES (4) 5218 5219/* Number of LZMA distance slots. LZMA uses six bits to encode larger 5220 match lengths, so 1 << 6 possible probabilities. */ 5221#define LZMA_DIST_SLOTS (64) 5222 5223/* LZMA distances 0 to 3 are encoded directly, larger values use a 5224 probability model. */ 5225#define LZMA_DIST_MODEL_START (4) 5226 5227/* The LZMA probability model ends at 14. */ 5228#define LZMA_DIST_MODEL_END (14) 5229 5230/* LZMA distance slots for distances less than 127. */ 5231#define LZMA_FULL_DISTANCES (128) 5232 5233/* LZMA uses four alignment bits. */ 5234#define LZMA_ALIGN_SIZE (16) 5235 5236/* LZMA match length is encoded with 4, 5, or 10 bits, some of which 5237 are already known. */ 5238#define LZMA_LEN_LOW_SYMBOLS (8) 5239#define LZMA_LEN_MID_SYMBOLS (8) 5240#define LZMA_LEN_HIGH_SYMBOLS (256) 5241 5242/* LZMA literal encoding. */ 5243#define LZMA_LITERAL_CODERS_MAX (16) 5244#define LZMA_LITERAL_CODER_SIZE (0x300) 5245 5246/* LZMA is based on a large set of probabilities, each managed 5247 independently. Each probability is an 11 bit number that we store 5248 in a uint16_t. We use a single large array of probabilities. */ 5249 5250/* Lengths of entries in the LZMA probabilities array. The names used 5251 here are copied from the Linux kernel implementation. */ 5252 5253#define LZMA_PROB_IS_MATCH_LEN (LZMA_STATES * LZMA_POS_STATES) 5254#define LZMA_PROB_IS_REP_LEN LZMA_STATES 5255#define LZMA_PROB_IS_REP0_LEN LZMA_STATES 5256#define LZMA_PROB_IS_REP1_LEN LZMA_STATES 5257#define LZMA_PROB_IS_REP2_LEN LZMA_STATES 5258#define LZMA_PROB_IS_REP0_LONG_LEN (LZMA_STATES * LZMA_POS_STATES) 5259#define LZMA_PROB_DIST_SLOT_LEN (LZMA_DIST_STATES * LZMA_DIST_SLOTS) 5260#define LZMA_PROB_DIST_SPECIAL_LEN (LZMA_FULL_DISTANCES - LZMA_DIST_MODEL_END) 5261#define LZMA_PROB_DIST_ALIGN_LEN LZMA_ALIGN_SIZE 5262#define LZMA_PROB_MATCH_LEN_CHOICE_LEN 1 5263#define LZMA_PROB_MATCH_LEN_CHOICE2_LEN 1 5264#define LZMA_PROB_MATCH_LEN_LOW_LEN (LZMA_POS_STATES * LZMA_LEN_LOW_SYMBOLS) 5265#define LZMA_PROB_MATCH_LEN_MID_LEN (LZMA_POS_STATES * LZMA_LEN_MID_SYMBOLS) 5266#define LZMA_PROB_MATCH_LEN_HIGH_LEN LZMA_LEN_HIGH_SYMBOLS 5267#define LZMA_PROB_REP_LEN_CHOICE_LEN 1 5268#define LZMA_PROB_REP_LEN_CHOICE2_LEN 1 5269#define LZMA_PROB_REP_LEN_LOW_LEN (LZMA_POS_STATES * LZMA_LEN_LOW_SYMBOLS) 5270#define LZMA_PROB_REP_LEN_MID_LEN (LZMA_POS_STATES * LZMA_LEN_MID_SYMBOLS) 5271#define LZMA_PROB_REP_LEN_HIGH_LEN LZMA_LEN_HIGH_SYMBOLS 5272#define LZMA_PROB_LITERAL_LEN \ 5273 (LZMA_LITERAL_CODERS_MAX * LZMA_LITERAL_CODER_SIZE) 5274 5275/* Offsets into the LZMA probabilities array. This is mechanically 5276 generated from the above lengths. */ 5277 5278#define LZMA_PROB_IS_MATCH_OFFSET 0 5279#define LZMA_PROB_IS_REP_OFFSET \ 5280 (LZMA_PROB_IS_MATCH_OFFSET + LZMA_PROB_IS_MATCH_LEN) 5281#define LZMA_PROB_IS_REP0_OFFSET \ 5282 (LZMA_PROB_IS_REP_OFFSET + LZMA_PROB_IS_REP_LEN) 5283#define LZMA_PROB_IS_REP1_OFFSET \ 5284 (LZMA_PROB_IS_REP0_OFFSET + LZMA_PROB_IS_REP0_LEN) 5285#define LZMA_PROB_IS_REP2_OFFSET \ 5286 (LZMA_PROB_IS_REP1_OFFSET + LZMA_PROB_IS_REP1_LEN) 5287#define LZMA_PROB_IS_REP0_LONG_OFFSET \ 5288 (LZMA_PROB_IS_REP2_OFFSET + LZMA_PROB_IS_REP2_LEN) 5289#define LZMA_PROB_DIST_SLOT_OFFSET \ 5290 (LZMA_PROB_IS_REP0_LONG_OFFSET + LZMA_PROB_IS_REP0_LONG_LEN) 5291#define LZMA_PROB_DIST_SPECIAL_OFFSET \ 5292 (LZMA_PROB_DIST_SLOT_OFFSET + LZMA_PROB_DIST_SLOT_LEN) 5293#define LZMA_PROB_DIST_ALIGN_OFFSET \ 5294 (LZMA_PROB_DIST_SPECIAL_OFFSET + LZMA_PROB_DIST_SPECIAL_LEN) 5295#define LZMA_PROB_MATCH_LEN_CHOICE_OFFSET \ 5296 (LZMA_PROB_DIST_ALIGN_OFFSET + LZMA_PROB_DIST_ALIGN_LEN) 5297#define LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET \ 5298 (LZMA_PROB_MATCH_LEN_CHOICE_OFFSET + LZMA_PROB_MATCH_LEN_CHOICE_LEN) 5299#define LZMA_PROB_MATCH_LEN_LOW_OFFSET \ 5300 (LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET + LZMA_PROB_MATCH_LEN_CHOICE2_LEN) 5301#define LZMA_PROB_MATCH_LEN_MID_OFFSET \ 5302 (LZMA_PROB_MATCH_LEN_LOW_OFFSET + LZMA_PROB_MATCH_LEN_LOW_LEN) 5303#define LZMA_PROB_MATCH_LEN_HIGH_OFFSET \ 5304 (LZMA_PROB_MATCH_LEN_MID_OFFSET + LZMA_PROB_MATCH_LEN_MID_LEN) 5305#define LZMA_PROB_REP_LEN_CHOICE_OFFSET \ 5306 (LZMA_PROB_MATCH_LEN_HIGH_OFFSET + LZMA_PROB_MATCH_LEN_HIGH_LEN) 5307#define LZMA_PROB_REP_LEN_CHOICE2_OFFSET \ 5308 (LZMA_PROB_REP_LEN_CHOICE_OFFSET + LZMA_PROB_REP_LEN_CHOICE_LEN) 5309#define LZMA_PROB_REP_LEN_LOW_OFFSET \ 5310 (LZMA_PROB_REP_LEN_CHOICE2_OFFSET + LZMA_PROB_REP_LEN_CHOICE2_LEN) 5311#define LZMA_PROB_REP_LEN_MID_OFFSET \ 5312 (LZMA_PROB_REP_LEN_LOW_OFFSET + LZMA_PROB_REP_LEN_LOW_LEN) 5313#define LZMA_PROB_REP_LEN_HIGH_OFFSET \ 5314 (LZMA_PROB_REP_LEN_MID_OFFSET + LZMA_PROB_REP_LEN_MID_LEN) 5315#define LZMA_PROB_LITERAL_OFFSET \ 5316 (LZMA_PROB_REP_LEN_HIGH_OFFSET + LZMA_PROB_REP_LEN_HIGH_LEN) 5317 5318#define LZMA_PROB_TOTAL_COUNT \ 5319 (LZMA_PROB_LITERAL_OFFSET + LZMA_PROB_LITERAL_LEN) 5320 5321/* Check that the number of LZMA probabilities is the same as the 5322 Linux kernel implementation. */ 5323 5324#if LZMA_PROB_TOTAL_COUNT != 1846 + (1 << 4) * 0x300 5325 #error Wrong number of LZMA probabilities 5326#endif 5327 5328/* Expressions for the offset in the LZMA probabilities array of a 5329 specific probability. */ 5330 5331#define LZMA_IS_MATCH(state, pos) \ 5332 (LZMA_PROB_IS_MATCH_OFFSET + (state) * LZMA_POS_STATES + (pos)) 5333#define LZMA_IS_REP(state) \ 5334 (LZMA_PROB_IS_REP_OFFSET + (state)) 5335#define LZMA_IS_REP0(state) \ 5336 (LZMA_PROB_IS_REP0_OFFSET + (state)) 5337#define LZMA_IS_REP1(state) \ 5338 (LZMA_PROB_IS_REP1_OFFSET + (state)) 5339#define LZMA_IS_REP2(state) \ 5340 (LZMA_PROB_IS_REP2_OFFSET + (state)) 5341#define LZMA_IS_REP0_LONG(state, pos) \ 5342 (LZMA_PROB_IS_REP0_LONG_OFFSET + (state) * LZMA_POS_STATES + (pos)) 5343#define LZMA_DIST_SLOT(dist, slot) \ 5344 (LZMA_PROB_DIST_SLOT_OFFSET + (dist) * LZMA_DIST_SLOTS + (slot)) 5345#define LZMA_DIST_SPECIAL(dist) \ 5346 (LZMA_PROB_DIST_SPECIAL_OFFSET + (dist)) 5347#define LZMA_DIST_ALIGN(dist) \ 5348 (LZMA_PROB_DIST_ALIGN_OFFSET + (dist)) 5349#define LZMA_MATCH_LEN_CHOICE \ 5350 LZMA_PROB_MATCH_LEN_CHOICE_OFFSET 5351#define LZMA_MATCH_LEN_CHOICE2 \ 5352 LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET 5353#define LZMA_MATCH_LEN_LOW(pos, sym) \ 5354 (LZMA_PROB_MATCH_LEN_LOW_OFFSET + (pos) * LZMA_LEN_LOW_SYMBOLS + (sym)) 5355#define LZMA_MATCH_LEN_MID(pos, sym) \ 5356 (LZMA_PROB_MATCH_LEN_MID_OFFSET + (pos) * LZMA_LEN_MID_SYMBOLS + (sym)) 5357#define LZMA_MATCH_LEN_HIGH(sym) \ 5358 (LZMA_PROB_MATCH_LEN_HIGH_OFFSET + (sym)) 5359#define LZMA_REP_LEN_CHOICE \ 5360 LZMA_PROB_REP_LEN_CHOICE_OFFSET 5361#define LZMA_REP_LEN_CHOICE2 \ 5362 LZMA_PROB_REP_LEN_CHOICE2_OFFSET 5363#define LZMA_REP_LEN_LOW(pos, sym) \ 5364 (LZMA_PROB_REP_LEN_LOW_OFFSET + (pos) * LZMA_LEN_LOW_SYMBOLS + (sym)) 5365#define LZMA_REP_LEN_MID(pos, sym) \ 5366 (LZMA_PROB_REP_LEN_MID_OFFSET + (pos) * LZMA_LEN_MID_SYMBOLS + (sym)) 5367#define LZMA_REP_LEN_HIGH(sym) \ 5368 (LZMA_PROB_REP_LEN_HIGH_OFFSET + (sym)) 5369#define LZMA_LITERAL(code, size) \ 5370 (LZMA_PROB_LITERAL_OFFSET + (code) * LZMA_LITERAL_CODER_SIZE + (size)) 5371 5372/* Read an LZMA varint from BUF, reading and updating *POFFSET, 5373 setting *VAL. Returns 0 on error, 1 on success. */ 5374 5375static int 5376elf_lzma_varint (const unsigned char *compressed, size_t compressed_size, 5377 size_t *poffset, uint64_t *val) 5378{ 5379 size_t off; 5380 int i; 5381 uint64_t v; 5382 unsigned char b; 5383 5384 off = *poffset; 5385 i = 0; 5386 v = 0; 5387 while (1) 5388 { 5389 if (unlikely (off >= compressed_size)) 5390 { 5391 elf_uncompress_failed (); 5392 return 0; 5393 } 5394 b = compressed[off]; 5395 v |= (b & 0x7f) << (i * 7); 5396 ++off; 5397 if ((b & 0x80) == 0) 5398 { 5399 *poffset = off; 5400 *val = v; 5401 return 1; 5402 } 5403 ++i; 5404 if (unlikely (i >= 9)) 5405 { 5406 elf_uncompress_failed (); 5407 return 0; 5408 } 5409 } 5410} 5411 5412/* Normalize the LZMA range decoder, pulling in an extra input byte if 5413 needed. */ 5414 5415static void 5416elf_lzma_range_normalize (const unsigned char *compressed, 5417 size_t compressed_size, size_t *poffset, 5418 uint32_t *prange, uint32_t *pcode) 5419{ 5420 if (*prange < (1U << 24)) 5421 { 5422 if (unlikely (*poffset >= compressed_size)) 5423 { 5424 /* We assume this will be caught elsewhere. */ 5425 elf_uncompress_failed (); 5426 return; 5427 } 5428 *prange <<= 8; 5429 *pcode <<= 8; 5430 *pcode += compressed[*poffset]; 5431 ++*poffset; 5432 } 5433} 5434 5435/* Read and return a single bit from the LZMA stream, reading and 5436 updating *PROB. Each bit comes from the range coder. */ 5437 5438static int 5439elf_lzma_bit (const unsigned char *compressed, size_t compressed_size, 5440 uint16_t *prob, size_t *poffset, uint32_t *prange, 5441 uint32_t *pcode) 5442{ 5443 uint32_t bound; 5444 5445 elf_lzma_range_normalize (compressed, compressed_size, poffset, 5446 prange, pcode); 5447 bound = (*prange >> 11) * (uint32_t) *prob; 5448 if (*pcode < bound) 5449 { 5450 *prange = bound; 5451 *prob += ((1U << 11) - *prob) >> 5; 5452 return 0; 5453 } 5454 else 5455 { 5456 *prange -= bound; 5457 *pcode -= bound; 5458 *prob -= *prob >> 5; 5459 return 1; 5460 } 5461} 5462 5463/* Read an integer of size BITS from the LZMA stream, most significant 5464 bit first. The bits are predicted using PROBS. */ 5465 5466static uint32_t 5467elf_lzma_integer (const unsigned char *compressed, size_t compressed_size, 5468 uint16_t *probs, uint32_t bits, size_t *poffset, 5469 uint32_t *prange, uint32_t *pcode) 5470{ 5471 uint32_t sym; 5472 uint32_t i; 5473 5474 sym = 1; 5475 for (i = 0; i < bits; i++) 5476 { 5477 int bit; 5478 5479 bit = elf_lzma_bit (compressed, compressed_size, probs + sym, poffset, 5480 prange, pcode); 5481 sym <<= 1; 5482 sym += bit; 5483 } 5484 return sym - (1 << bits); 5485} 5486 5487/* Read an integer of size BITS from the LZMA stream, least 5488 significant bit first. The bits are predicted using PROBS. */ 5489 5490static uint32_t 5491elf_lzma_reverse_integer (const unsigned char *compressed, 5492 size_t compressed_size, uint16_t *probs, 5493 uint32_t bits, size_t *poffset, uint32_t *prange, 5494 uint32_t *pcode) 5495{ 5496 uint32_t sym; 5497 uint32_t val; 5498 uint32_t i; 5499 5500 sym = 1; 5501 val = 0; 5502 for (i = 0; i < bits; i++) 5503 { 5504 int bit; 5505 5506 bit = elf_lzma_bit (compressed, compressed_size, probs + sym, poffset, 5507 prange, pcode); 5508 sym <<= 1; 5509 sym += bit; 5510 val += bit << i; 5511 } 5512 return val; 5513} 5514 5515/* Read a length from the LZMA stream. IS_REP picks either LZMA_MATCH 5516 or LZMA_REP probabilities. */ 5517 5518static uint32_t 5519elf_lzma_len (const unsigned char *compressed, size_t compressed_size, 5520 uint16_t *probs, int is_rep, unsigned int pos_state, 5521 size_t *poffset, uint32_t *prange, uint32_t *pcode) 5522{ 5523 uint16_t *probs_choice; 5524 uint16_t *probs_sym; 5525 uint32_t bits; 5526 uint32_t len; 5527 5528 probs_choice = probs + (is_rep 5529 ? LZMA_REP_LEN_CHOICE 5530 : LZMA_MATCH_LEN_CHOICE); 5531 if (elf_lzma_bit (compressed, compressed_size, probs_choice, poffset, 5532 prange, pcode)) 5533 { 5534 probs_choice = probs + (is_rep 5535 ? LZMA_REP_LEN_CHOICE2 5536 : LZMA_MATCH_LEN_CHOICE2); 5537 if (elf_lzma_bit (compressed, compressed_size, probs_choice, 5538 poffset, prange, pcode)) 5539 { 5540 probs_sym = probs + (is_rep 5541 ? LZMA_REP_LEN_HIGH (0) 5542 : LZMA_MATCH_LEN_HIGH (0)); 5543 bits = 8; 5544 len = 2 + 8 + 8; 5545 } 5546 else 5547 { 5548 probs_sym = probs + (is_rep 5549 ? LZMA_REP_LEN_MID (pos_state, 0) 5550 : LZMA_MATCH_LEN_MID (pos_state, 0)); 5551 bits = 3; 5552 len = 2 + 8; 5553 } 5554 } 5555 else 5556 { 5557 probs_sym = probs + (is_rep 5558 ? LZMA_REP_LEN_LOW (pos_state, 0) 5559 : LZMA_MATCH_LEN_LOW (pos_state, 0)); 5560 bits = 3; 5561 len = 2; 5562 } 5563 5564 len += elf_lzma_integer (compressed, compressed_size, probs_sym, bits, 5565 poffset, prange, pcode); 5566 return len; 5567} 5568 5569/* Uncompress one LZMA block from a minidebug file. The compressed 5570 data is at COMPRESSED + *POFFSET. Update *POFFSET. Store the data 5571 into the memory at UNCOMPRESSED, size UNCOMPRESSED_SIZE. CHECK is 5572 the stream flag from the xz header. Return 1 on successful 5573 decompression. */ 5574 5575static int 5576elf_uncompress_lzma_block (const unsigned char *compressed, 5577 size_t compressed_size, unsigned char check, 5578 uint16_t *probs, unsigned char *uncompressed, 5579 size_t uncompressed_size, size_t *poffset) 5580{ 5581 size_t off; 5582 size_t block_header_offset; 5583 size_t block_header_size; 5584 unsigned char block_flags; 5585 uint64_t header_compressed_size; 5586 uint64_t header_uncompressed_size; 5587 unsigned char lzma2_properties; 5588 uint32_t computed_crc; 5589 uint32_t stream_crc; 5590 size_t uncompressed_offset; 5591 size_t dict_start_offset; 5592 unsigned int lc; 5593 unsigned int lp; 5594 unsigned int pb; 5595 uint32_t range; 5596 uint32_t code; 5597 uint32_t lstate; 5598 uint32_t dist[4]; 5599 5600 off = *poffset; 5601 block_header_offset = off; 5602 5603 /* Block header size is a single byte. */ 5604 if (unlikely (off >= compressed_size)) 5605 { 5606 elf_uncompress_failed (); 5607 return 0; 5608 } 5609 block_header_size = (compressed[off] + 1) * 4; 5610 if (unlikely (off + block_header_size > compressed_size)) 5611 { 5612 elf_uncompress_failed (); 5613 return 0; 5614 } 5615 5616 /* Block flags. */ 5617 block_flags = compressed[off + 1]; 5618 if (unlikely ((block_flags & 0x3c) != 0)) 5619 { 5620 elf_uncompress_failed (); 5621 return 0; 5622 } 5623 5624 off += 2; 5625 5626 /* Optional compressed size. */ 5627 header_compressed_size = 0; 5628 if ((block_flags & 0x40) != 0) 5629 { 5630 *poffset = off; 5631 if (!elf_lzma_varint (compressed, compressed_size, poffset, 5632 &header_compressed_size)) 5633 return 0; 5634 off = *poffset; 5635 } 5636 5637 /* Optional uncompressed size. */ 5638 header_uncompressed_size = 0; 5639 if ((block_flags & 0x80) != 0) 5640 { 5641 *poffset = off; 5642 if (!elf_lzma_varint (compressed, compressed_size, poffset, 5643 &header_uncompressed_size)) 5644 return 0; 5645 off = *poffset; 5646 } 5647 5648 /* The recipe for creating a minidebug file is to run the xz program 5649 with no arguments, so we expect exactly one filter: lzma2. */ 5650 5651 if (unlikely ((block_flags & 0x3) != 0)) 5652 { 5653 elf_uncompress_failed (); 5654 return 0; 5655 } 5656 5657 if (unlikely (off + 2 >= block_header_offset + block_header_size)) 5658 { 5659 elf_uncompress_failed (); 5660 return 0; 5661 } 5662 5663 /* The filter ID for LZMA2 is 0x21. */ 5664 if (unlikely (compressed[off] != 0x21)) 5665 { 5666 elf_uncompress_failed (); 5667 return 0; 5668 } 5669 ++off; 5670 5671 /* The size of the filter properties for LZMA2 is 1. */ 5672 if (unlikely (compressed[off] != 1)) 5673 { 5674 elf_uncompress_failed (); 5675 return 0; 5676 } 5677 ++off; 5678 5679 lzma2_properties = compressed[off]; 5680 ++off; 5681 5682 if (unlikely (lzma2_properties > 40)) 5683 { 5684 elf_uncompress_failed (); 5685 return 0; 5686 } 5687 5688 /* The properties describe the dictionary size, but we don't care 5689 what that is. */ 5690 5691 /* Block header padding. */ 5692 if (unlikely (off + 4 > compressed_size)) 5693 { 5694 elf_uncompress_failed (); 5695 return 0; 5696 } 5697 5698 off = (off + 3) &~ (size_t) 3; 5699 5700 if (unlikely (off + 4 > compressed_size)) 5701 { 5702 elf_uncompress_failed (); 5703 return 0; 5704 } 5705 5706 /* Block header CRC. */ 5707 computed_crc = elf_crc32 (0, compressed + block_header_offset, 5708 block_header_size - 4); 5709 stream_crc = (compressed[off] 5710 | (compressed[off + 1] << 8) 5711 | (compressed[off + 2] << 16) 5712 | (compressed[off + 3] << 24)); 5713 if (unlikely (computed_crc != stream_crc)) 5714 { 5715 elf_uncompress_failed (); 5716 return 0; 5717 } 5718 off += 4; 5719 5720 /* Read a sequence of LZMA2 packets. */ 5721 5722 uncompressed_offset = 0; 5723 dict_start_offset = 0; 5724 lc = 0; 5725 lp = 0; 5726 pb = 0; 5727 lstate = 0; 5728 while (off < compressed_size) 5729 { 5730 unsigned char control; 5731 5732 range = 0xffffffff; 5733 code = 0; 5734 5735 control = compressed[off]; 5736 ++off; 5737 if (unlikely (control == 0)) 5738 { 5739 /* End of packets. */ 5740 break; 5741 } 5742 5743 if (control == 1 || control >= 0xe0) 5744 { 5745 /* Reset dictionary to empty. */ 5746 dict_start_offset = uncompressed_offset; 5747 } 5748 5749 if (control < 0x80) 5750 { 5751 size_t chunk_size; 5752 5753 /* The only valid values here are 1 or 2. A 1 means to 5754 reset the dictionary (done above). Then we see an 5755 uncompressed chunk. */ 5756 5757 if (unlikely (control > 2)) 5758 { 5759 elf_uncompress_failed (); 5760 return 0; 5761 } 5762 5763 /* An uncompressed chunk is a two byte size followed by 5764 data. */ 5765 5766 if (unlikely (off + 2 > compressed_size)) 5767 { 5768 elf_uncompress_failed (); 5769 return 0; 5770 } 5771 5772 chunk_size = compressed[off] << 8; 5773 chunk_size += compressed[off + 1]; 5774 ++chunk_size; 5775 5776 off += 2; 5777 5778 if (unlikely (off + chunk_size > compressed_size)) 5779 { 5780 elf_uncompress_failed (); 5781 return 0; 5782 } 5783 if (unlikely (uncompressed_offset + chunk_size > uncompressed_size)) 5784 { 5785 elf_uncompress_failed (); 5786 return 0; 5787 } 5788 5789 memcpy (uncompressed + uncompressed_offset, compressed + off, 5790 chunk_size); 5791 uncompressed_offset += chunk_size; 5792 off += chunk_size; 5793 } 5794 else 5795 { 5796 size_t uncompressed_chunk_start; 5797 size_t uncompressed_chunk_size; 5798 size_t compressed_chunk_size; 5799 size_t limit; 5800 5801 /* An LZMA chunk. This starts with an uncompressed size and 5802 a compressed size. */ 5803 5804 if (unlikely (off + 4 >= compressed_size)) 5805 { 5806 elf_uncompress_failed (); 5807 return 0; 5808 } 5809 5810 uncompressed_chunk_start = uncompressed_offset; 5811 5812 uncompressed_chunk_size = (control & 0x1f) << 16; 5813 uncompressed_chunk_size += compressed[off] << 8; 5814 uncompressed_chunk_size += compressed[off + 1]; 5815 ++uncompressed_chunk_size; 5816 5817 compressed_chunk_size = compressed[off + 2] << 8; 5818 compressed_chunk_size += compressed[off + 3]; 5819 ++compressed_chunk_size; 5820 5821 off += 4; 5822 5823 /* Bit 7 (0x80) is set. 5824 Bits 6 and 5 (0x40 and 0x20) are as follows: 5825 0: don't reset anything 5826 1: reset state 5827 2: reset state, read properties 5828 3: reset state, read properties, reset dictionary (done above) */ 5829 5830 if (control >= 0xc0) 5831 { 5832 unsigned char props; 5833 5834 /* Bit 6 is set, read properties. */ 5835 5836 if (unlikely (off >= compressed_size)) 5837 { 5838 elf_uncompress_failed (); 5839 return 0; 5840 } 5841 props = compressed[off]; 5842 ++off; 5843 if (unlikely (props > (4 * 5 + 4) * 9 + 8)) 5844 { 5845 elf_uncompress_failed (); 5846 return 0; 5847 } 5848 pb = 0; 5849 while (props >= 9 * 5) 5850 { 5851 props -= 9 * 5; 5852 ++pb; 5853 } 5854 lp = 0; 5855 while (props > 9) 5856 { 5857 props -= 9; 5858 ++lp; 5859 } 5860 lc = props; 5861 if (unlikely (lc + lp > 4)) 5862 { 5863 elf_uncompress_failed (); 5864 return 0; 5865 } 5866 } 5867 5868 if (control >= 0xa0) 5869 { 5870 size_t i; 5871 5872 /* Bit 5 or 6 is set, reset LZMA state. */ 5873 5874 lstate = 0; 5875 memset (&dist, 0, sizeof dist); 5876 for (i = 0; i < LZMA_PROB_TOTAL_COUNT; i++) 5877 probs[i] = 1 << 10; 5878 range = 0xffffffff; 5879 code = 0; 5880 } 5881 5882 /* Read the range code. */ 5883 5884 if (unlikely (off + 5 > compressed_size)) 5885 { 5886 elf_uncompress_failed (); 5887 return 0; 5888 } 5889 5890 /* The byte at compressed[off] is ignored for some 5891 reason. */ 5892 5893 code = ((compressed[off + 1] << 24) 5894 + (compressed[off + 2] << 16) 5895 + (compressed[off + 3] << 8) 5896 + compressed[off + 4]); 5897 off += 5; 5898 5899 /* This is the main LZMA decode loop. */ 5900 5901 limit = off + compressed_chunk_size; 5902 *poffset = off; 5903 while (*poffset < limit) 5904 { 5905 unsigned int pos_state; 5906 5907 if (unlikely (uncompressed_offset 5908 == (uncompressed_chunk_start 5909 + uncompressed_chunk_size))) 5910 { 5911 /* We've decompressed all the expected bytes. */ 5912 break; 5913 } 5914 5915 pos_state = ((uncompressed_offset - dict_start_offset) 5916 & ((1 << pb) - 1)); 5917 5918 if (elf_lzma_bit (compressed, compressed_size, 5919 probs + LZMA_IS_MATCH (lstate, pos_state), 5920 poffset, &range, &code)) 5921 { 5922 uint32_t len; 5923 5924 if (elf_lzma_bit (compressed, compressed_size, 5925 probs + LZMA_IS_REP (lstate), 5926 poffset, &range, &code)) 5927 { 5928 int short_rep; 5929 uint32_t next_dist; 5930 5931 /* Repeated match. */ 5932 5933 short_rep = 0; 5934 if (elf_lzma_bit (compressed, compressed_size, 5935 probs + LZMA_IS_REP0 (lstate), 5936 poffset, &range, &code)) 5937 { 5938 if (elf_lzma_bit (compressed, compressed_size, 5939 probs + LZMA_IS_REP1 (lstate), 5940 poffset, &range, &code)) 5941 { 5942 if (elf_lzma_bit (compressed, compressed_size, 5943 probs + LZMA_IS_REP2 (lstate), 5944 poffset, &range, &code)) 5945 { 5946 next_dist = dist[3]; 5947 dist[3] = dist[2]; 5948 } 5949 else 5950 { 5951 next_dist = dist[2]; 5952 } 5953 dist[2] = dist[1]; 5954 } 5955 else 5956 { 5957 next_dist = dist[1]; 5958 } 5959 5960 dist[1] = dist[0]; 5961 dist[0] = next_dist; 5962 } 5963 else 5964 { 5965 if (!elf_lzma_bit (compressed, compressed_size, 5966 (probs 5967 + LZMA_IS_REP0_LONG (lstate, 5968 pos_state)), 5969 poffset, &range, &code)) 5970 short_rep = 1; 5971 } 5972 5973 if (lstate < 7) 5974 lstate = short_rep ? 9 : 8; 5975 else 5976 lstate = 11; 5977 5978 if (short_rep) 5979 len = 1; 5980 else 5981 len = elf_lzma_len (compressed, compressed_size, 5982 probs, 1, pos_state, poffset, 5983 &range, &code); 5984 } 5985 else 5986 { 5987 uint32_t dist_state; 5988 uint32_t dist_slot; 5989 uint16_t *probs_dist; 5990 5991 /* Match. */ 5992 5993 if (lstate < 7) 5994 lstate = 7; 5995 else 5996 lstate = 10; 5997 dist[3] = dist[2]; 5998 dist[2] = dist[1]; 5999 dist[1] = dist[0]; 6000 len = elf_lzma_len (compressed, compressed_size, 6001 probs, 0, pos_state, poffset, 6002 &range, &code); 6003 6004 if (len < 4 + 2) 6005 dist_state = len - 2; 6006 else 6007 dist_state = 3; 6008 probs_dist = probs + LZMA_DIST_SLOT (dist_state, 0); 6009 dist_slot = elf_lzma_integer (compressed, 6010 compressed_size, 6011 probs_dist, 6, 6012 poffset, &range, 6013 &code); 6014 if (dist_slot < LZMA_DIST_MODEL_START) 6015 dist[0] = dist_slot; 6016 else 6017 { 6018 uint32_t limit; 6019 6020 limit = (dist_slot >> 1) - 1; 6021 dist[0] = 2 + (dist_slot & 1); 6022 if (dist_slot < LZMA_DIST_MODEL_END) 6023 { 6024 dist[0] <<= limit; 6025 probs_dist = (probs 6026 + LZMA_DIST_SPECIAL(dist[0] 6027 - dist_slot 6028 - 1)); 6029 dist[0] += 6030 elf_lzma_reverse_integer (compressed, 6031 compressed_size, 6032 probs_dist, 6033 limit, poffset, 6034 &range, &code); 6035 } 6036 else 6037 { 6038 uint32_t dist0; 6039 uint32_t i; 6040 6041 dist0 = dist[0]; 6042 for (i = 0; i < limit - 4; i++) 6043 { 6044 uint32_t mask; 6045 6046 elf_lzma_range_normalize (compressed, 6047 compressed_size, 6048 poffset, 6049 &range, &code); 6050 range >>= 1; 6051 code -= range; 6052 mask = -(code >> 31); 6053 code += range & mask; 6054 dist0 <<= 1; 6055 dist0 += mask + 1; 6056 } 6057 dist0 <<= 4; 6058 probs_dist = probs + LZMA_DIST_ALIGN (0); 6059 dist0 += 6060 elf_lzma_reverse_integer (compressed, 6061 compressed_size, 6062 probs_dist, 4, 6063 poffset, 6064 &range, &code); 6065 dist[0] = dist0; 6066 } 6067 } 6068 } 6069 6070 if (unlikely (uncompressed_offset 6071 - dict_start_offset < dist[0] + 1)) 6072 { 6073 elf_uncompress_failed (); 6074 return 0; 6075 } 6076 if (unlikely (uncompressed_offset + len > uncompressed_size)) 6077 { 6078 elf_uncompress_failed (); 6079 return 0; 6080 } 6081 6082 if (dist[0] == 0) 6083 { 6084 /* A common case, meaning repeat the last 6085 character LEN times. */ 6086 memset (uncompressed + uncompressed_offset, 6087 uncompressed[uncompressed_offset - 1], 6088 len); 6089 uncompressed_offset += len; 6090 } 6091 else if (dist[0] + 1 >= len) 6092 { 6093 memcpy (uncompressed + uncompressed_offset, 6094 uncompressed + uncompressed_offset - dist[0] - 1, 6095 len); 6096 uncompressed_offset += len; 6097 } 6098 else 6099 { 6100 while (len > 0) 6101 { 6102 uint32_t copy; 6103 6104 copy = len < dist[0] + 1 ? len : dist[0] + 1; 6105 memcpy (uncompressed + uncompressed_offset, 6106 (uncompressed + uncompressed_offset 6107 - dist[0] - 1), 6108 copy); 6109 len -= copy; 6110 uncompressed_offset += copy; 6111 } 6112 } 6113 } 6114 else 6115 { 6116 unsigned char prev; 6117 unsigned char low; 6118 size_t high; 6119 uint16_t *lit_probs; 6120 unsigned int sym; 6121 6122 /* Literal value. */ 6123 6124 if (uncompressed_offset > 0) 6125 prev = uncompressed[uncompressed_offset - 1]; 6126 else 6127 prev = 0; 6128 low = prev >> (8 - lc); 6129 high = (((uncompressed_offset - dict_start_offset) 6130 & ((1 << lp) - 1)) 6131 << lc); 6132 lit_probs = probs + LZMA_LITERAL (low + high, 0); 6133 if (lstate < 7) 6134 sym = elf_lzma_integer (compressed, compressed_size, 6135 lit_probs, 8, poffset, &range, 6136 &code); 6137 else 6138 { 6139 unsigned int match; 6140 unsigned int bit; 6141 unsigned int match_bit; 6142 unsigned int idx; 6143 6144 sym = 1; 6145 if (uncompressed_offset >= dist[0] + 1) 6146 match = uncompressed[uncompressed_offset - dist[0] - 1]; 6147 else 6148 match = 0; 6149 match <<= 1; 6150 bit = 0x100; 6151 do 6152 { 6153 match_bit = match & bit; 6154 match <<= 1; 6155 idx = bit + match_bit + sym; 6156 sym <<= 1; 6157 if (elf_lzma_bit (compressed, compressed_size, 6158 lit_probs + idx, poffset, 6159 &range, &code)) 6160 { 6161 ++sym; 6162 bit &= match_bit; 6163 } 6164 else 6165 { 6166 bit &= ~ match_bit; 6167 } 6168 } 6169 while (sym < 0x100); 6170 } 6171 6172 if (unlikely (uncompressed_offset >= uncompressed_size)) 6173 { 6174 elf_uncompress_failed (); 6175 return 0; 6176 } 6177 6178 uncompressed[uncompressed_offset] = (unsigned char) sym; 6179 ++uncompressed_offset; 6180 if (lstate <= 3) 6181 lstate = 0; 6182 else if (lstate <= 9) 6183 lstate -= 3; 6184 else 6185 lstate -= 6; 6186 } 6187 } 6188 6189 elf_lzma_range_normalize (compressed, compressed_size, poffset, 6190 &range, &code); 6191 6192 off = *poffset; 6193 } 6194 } 6195 6196 /* We have reached the end of the block. Pad to four byte 6197 boundary. */ 6198 off = (off + 3) &~ (size_t) 3; 6199 if (unlikely (off > compressed_size)) 6200 { 6201 elf_uncompress_failed (); 6202 return 0; 6203 } 6204 6205 switch (check) 6206 { 6207 case 0: 6208 /* No check. */ 6209 break; 6210 6211 case 1: 6212 /* CRC32 */ 6213 if (unlikely (off + 4 > compressed_size)) 6214 { 6215 elf_uncompress_failed (); 6216 return 0; 6217 } 6218 computed_crc = elf_crc32 (0, uncompressed, uncompressed_offset); 6219 stream_crc = (compressed[off] 6220 | (compressed[off + 1] << 8) 6221 | (compressed[off + 2] << 16) 6222 | (compressed[off + 3] << 24)); 6223 if (computed_crc != stream_crc) 6224 { 6225 elf_uncompress_failed (); 6226 return 0; 6227 } 6228 off += 4; 6229 break; 6230 6231 case 4: 6232 /* CRC64. We don't bother computing a CRC64 checksum. */ 6233 if (unlikely (off + 8 > compressed_size)) 6234 { 6235 elf_uncompress_failed (); 6236 return 0; 6237 } 6238 off += 8; 6239 break; 6240 6241 case 10: 6242 /* SHA. We don't bother computing a SHA checksum. */ 6243 if (unlikely (off + 32 > compressed_size)) 6244 { 6245 elf_uncompress_failed (); 6246 return 0; 6247 } 6248 off += 32; 6249 break; 6250 6251 default: 6252 elf_uncompress_failed (); 6253 return 0; 6254 } 6255 6256 *poffset = off; 6257 6258 return 1; 6259} 6260 6261/* Uncompress LZMA data found in a minidebug file. The minidebug 6262 format is described at 6263 https://sourceware.org/gdb/current/onlinedocs/gdb/MiniDebugInfo.html. 6264 Returns 0 on error, 1 on successful decompression. For this 6265 function we return 0 on failure to decompress, as the calling code 6266 will carry on in that case. */ 6267 6268static int 6269elf_uncompress_lzma (struct backtrace_state *state, 6270 const unsigned char *compressed, size_t compressed_size, 6271 backtrace_error_callback error_callback, void *data, 6272 unsigned char **uncompressed, size_t *uncompressed_size) 6273{ 6274 size_t header_size; 6275 size_t footer_size; 6276 unsigned char check; 6277 uint32_t computed_crc; 6278 uint32_t stream_crc; 6279 size_t offset; 6280 size_t index_size; 6281 size_t footer_offset; 6282 size_t index_offset; 6283 uint64_t index_compressed_size; 6284 uint64_t index_uncompressed_size; 6285 unsigned char *mem; 6286 uint16_t *probs; 6287 size_t compressed_block_size; 6288 6289 /* The format starts with a stream header and ends with a stream 6290 footer. */ 6291 header_size = 12; 6292 footer_size = 12; 6293 if (unlikely (compressed_size < header_size + footer_size)) 6294 { 6295 elf_uncompress_failed (); 6296 return 0; 6297 } 6298 6299 /* The stream header starts with a magic string. */ 6300 if (unlikely (memcmp (compressed, "\375" "7zXZ\0", 6) != 0)) 6301 { 6302 elf_uncompress_failed (); 6303 return 0; 6304 } 6305 6306 /* Next come stream flags. The first byte is zero, the second byte 6307 is the check. */ 6308 if (unlikely (compressed[6] != 0)) 6309 { 6310 elf_uncompress_failed (); 6311 return 0; 6312 } 6313 check = compressed[7]; 6314 if (unlikely ((check & 0xf8) != 0)) 6315 { 6316 elf_uncompress_failed (); 6317 return 0; 6318 } 6319 6320 /* Next comes a CRC of the stream flags. */ 6321 computed_crc = elf_crc32 (0, compressed + 6, 2); 6322 stream_crc = (compressed[8] 6323 | (compressed[9] << 8) 6324 | (compressed[10] << 16) 6325 | (compressed[11] << 24)); 6326 if (unlikely (computed_crc != stream_crc)) 6327 { 6328 elf_uncompress_failed (); 6329 return 0; 6330 } 6331 6332 /* Now that we've parsed the header, parse the footer, so that we 6333 can get the uncompressed size. */ 6334 6335 /* The footer ends with two magic bytes. */ 6336 6337 offset = compressed_size; 6338 if (unlikely (memcmp (compressed + offset - 2, "YZ", 2) != 0)) 6339 { 6340 elf_uncompress_failed (); 6341 return 0; 6342 } 6343 offset -= 2; 6344 6345 /* Before that are the stream flags, which should be the same as the 6346 flags in the header. */ 6347 if (unlikely (compressed[offset - 2] != 0 6348 || compressed[offset - 1] != check)) 6349 { 6350 elf_uncompress_failed (); 6351 return 0; 6352 } 6353 offset -= 2; 6354 6355 /* Before that is the size of the index field, which precedes the 6356 footer. */ 6357 index_size = (compressed[offset - 4] 6358 | (compressed[offset - 3] << 8) 6359 | (compressed[offset - 2] << 16) 6360 | (compressed[offset - 1] << 24)); 6361 index_size = (index_size + 1) * 4; 6362 offset -= 4; 6363 6364 /* Before that is a footer CRC. */ 6365 computed_crc = elf_crc32 (0, compressed + offset, 6); 6366 stream_crc = (compressed[offset - 4] 6367 | (compressed[offset - 3] << 8) 6368 | (compressed[offset - 2] << 16) 6369 | (compressed[offset - 1] << 24)); 6370 if (unlikely (computed_crc != stream_crc)) 6371 { 6372 elf_uncompress_failed (); 6373 return 0; 6374 } 6375 offset -= 4; 6376 6377 /* The index comes just before the footer. */ 6378 if (unlikely (offset < index_size + header_size)) 6379 { 6380 elf_uncompress_failed (); 6381 return 0; 6382 } 6383 6384 footer_offset = offset; 6385 offset -= index_size; 6386 index_offset = offset; 6387 6388 /* The index starts with a zero byte. */ 6389 if (unlikely (compressed[offset] != 0)) 6390 { 6391 elf_uncompress_failed (); 6392 return 0; 6393 } 6394 ++offset; 6395 6396 /* Next is the number of blocks. We expect zero blocks for an empty 6397 stream, and otherwise a single block. */ 6398 if (unlikely (compressed[offset] == 0)) 6399 { 6400 *uncompressed = NULL; 6401 *uncompressed_size = 0; 6402 return 1; 6403 } 6404 if (unlikely (compressed[offset] != 1)) 6405 { 6406 elf_uncompress_failed (); 6407 return 0; 6408 } 6409 ++offset; 6410 6411 /* Next is the compressed size and the uncompressed size. */ 6412 if (!elf_lzma_varint (compressed, compressed_size, &offset, 6413 &index_compressed_size)) 6414 return 0; 6415 if (!elf_lzma_varint (compressed, compressed_size, &offset, 6416 &index_uncompressed_size)) 6417 return 0; 6418 6419 /* Pad to a four byte boundary. */ 6420 offset = (offset + 3) &~ (size_t) 3; 6421 6422 /* Next is a CRC of the index. */ 6423 computed_crc = elf_crc32 (0, compressed + index_offset, 6424 offset - index_offset); 6425 stream_crc = (compressed[offset] 6426 | (compressed[offset + 1] << 8) 6427 | (compressed[offset + 2] << 16) 6428 | (compressed[offset + 3] << 24)); 6429 if (unlikely (computed_crc != stream_crc)) 6430 { 6431 elf_uncompress_failed (); 6432 return 0; 6433 } 6434 offset += 4; 6435 6436 /* We should now be back at the footer. */ 6437 if (unlikely (offset != footer_offset)) 6438 { 6439 elf_uncompress_failed (); 6440 return 0; 6441 } 6442 6443 /* Allocate space to hold the uncompressed data. If we succeed in 6444 uncompressing the LZMA data, we never free this memory. */ 6445 mem = (unsigned char *) backtrace_alloc (state, index_uncompressed_size, 6446 error_callback, data); 6447 if (unlikely (mem == NULL)) 6448 return 0; 6449 *uncompressed = mem; 6450 *uncompressed_size = index_uncompressed_size; 6451 6452 /* Allocate space for probabilities. */ 6453 probs = ((uint16_t *) 6454 backtrace_alloc (state, 6455 LZMA_PROB_TOTAL_COUNT * sizeof (uint16_t), 6456 error_callback, data)); 6457 if (unlikely (probs == NULL)) 6458 { 6459 backtrace_free (state, mem, index_uncompressed_size, error_callback, 6460 data); 6461 return 0; 6462 } 6463 6464 /* Uncompress the block, which follows the header. */ 6465 offset = 12; 6466 if (!elf_uncompress_lzma_block (compressed, compressed_size, check, probs, 6467 mem, index_uncompressed_size, &offset)) 6468 { 6469 backtrace_free (state, mem, index_uncompressed_size, error_callback, 6470 data); 6471 return 0; 6472 } 6473 6474 compressed_block_size = offset - 12; 6475 if (unlikely (compressed_block_size 6476 != ((index_compressed_size + 3) &~ (size_t) 3))) 6477 { 6478 elf_uncompress_failed (); 6479 backtrace_free (state, mem, index_uncompressed_size, error_callback, 6480 data); 6481 return 0; 6482 } 6483 6484 offset = (offset + 3) &~ (size_t) 3; 6485 if (unlikely (offset != index_offset)) 6486 { 6487 elf_uncompress_failed (); 6488 backtrace_free (state, mem, index_uncompressed_size, error_callback, 6489 data); 6490 return 0; 6491 } 6492 6493 return 1; 6494} 6495 6496/* This function is a hook for testing the LZMA support. It is only 6497 used by tests. */ 6498 6499int 6500backtrace_uncompress_lzma (struct backtrace_state *state, 6501 const unsigned char *compressed, 6502 size_t compressed_size, 6503 backtrace_error_callback error_callback, 6504 void *data, unsigned char **uncompressed, 6505 size_t *uncompressed_size) 6506{ 6507 return elf_uncompress_lzma (state, compressed, compressed_size, 6508 error_callback, data, uncompressed, 6509 uncompressed_size); 6510} 6511 6512/* Add the backtrace data for one ELF file. Returns 1 on success, 6513 0 on failure (in both cases descriptor is closed) or -1 if exe 6514 is non-zero and the ELF file is ET_DYN, which tells the caller that 6515 elf_add will need to be called on the descriptor again after 6516 base_address is determined. */ 6517 6518static int 6519elf_add (struct backtrace_state *state, const char *filename, int descriptor, 6520 const unsigned char *memory, size_t memory_size, 6521 uintptr_t base_address, backtrace_error_callback error_callback, 6522 void *data, fileline *fileline_fn, int *found_sym, int *found_dwarf, 6523 struct dwarf_data **fileline_entry, int exe, int debuginfo, 6524 const char *with_buildid_data, uint32_t with_buildid_size) 6525{ 6526 struct elf_view ehdr_view; 6527 b_elf_ehdr ehdr; 6528 off_t shoff; 6529 unsigned int shnum; 6530 unsigned int shstrndx; 6531 struct elf_view shdrs_view; 6532 int shdrs_view_valid; 6533 const b_elf_shdr *shdrs; 6534 const b_elf_shdr *shstrhdr; 6535 size_t shstr_size; 6536 off_t shstr_off; 6537 struct elf_view names_view; 6538 int names_view_valid; 6539 const char *names; 6540 unsigned int symtab_shndx; 6541 unsigned int dynsym_shndx; 6542 unsigned int i; 6543 struct debug_section_info sections[DEBUG_MAX]; 6544 struct debug_section_info zsections[DEBUG_MAX]; 6545 struct elf_view symtab_view; 6546 int symtab_view_valid; 6547 struct elf_view strtab_view; 6548 int strtab_view_valid; 6549 struct elf_view buildid_view; 6550 int buildid_view_valid; 6551 const char *buildid_data; 6552 uint32_t buildid_size; 6553 struct elf_view debuglink_view; 6554 int debuglink_view_valid; 6555 const char *debuglink_name; 6556 uint32_t debuglink_crc; 6557 struct elf_view debugaltlink_view; 6558 int debugaltlink_view_valid; 6559 const char *debugaltlink_name; 6560 const char *debugaltlink_buildid_data; 6561 uint32_t debugaltlink_buildid_size; 6562 struct elf_view gnu_debugdata_view; 6563 int gnu_debugdata_view_valid; 6564 size_t gnu_debugdata_size; 6565 unsigned char *gnu_debugdata_uncompressed; 6566 size_t gnu_debugdata_uncompressed_size; 6567 off_t min_offset; 6568 off_t max_offset; 6569 off_t debug_size; 6570 struct elf_view debug_view; 6571 int debug_view_valid; 6572 unsigned int using_debug_view; 6573 uint16_t *zdebug_table; 6574 struct elf_view split_debug_view[DEBUG_MAX]; 6575 unsigned char split_debug_view_valid[DEBUG_MAX]; 6576 struct elf_ppc64_opd_data opd_data, *opd; 6577 struct dwarf_sections dwarf_sections; 6578 struct dwarf_data *fileline_altlink = NULL; 6579 6580 if (!debuginfo) 6581 { 6582 *found_sym = 0; 6583 *found_dwarf = 0; 6584 } 6585 6586 shdrs_view_valid = 0; 6587 names_view_valid = 0; 6588 symtab_view_valid = 0; 6589 strtab_view_valid = 0; 6590 buildid_view_valid = 0; 6591 buildid_data = NULL; 6592 buildid_size = 0; 6593 debuglink_view_valid = 0; 6594 debuglink_name = NULL; 6595 debuglink_crc = 0; 6596 debugaltlink_view_valid = 0; 6597 debugaltlink_name = NULL; 6598 debugaltlink_buildid_data = NULL; 6599 debugaltlink_buildid_size = 0; 6600 gnu_debugdata_view_valid = 0; 6601 gnu_debugdata_size = 0; 6602 debug_view_valid = 0; 6603 memset (&split_debug_view_valid[0], 0, sizeof split_debug_view_valid); 6604 opd = NULL; 6605 6606 if (!elf_get_view (state, descriptor, memory, memory_size, 0, sizeof ehdr, 6607 error_callback, data, &ehdr_view)) 6608 goto fail; 6609 6610 memcpy (&ehdr, ehdr_view.view.data, sizeof ehdr); 6611 6612 elf_release_view (state, &ehdr_view, error_callback, data); 6613 6614 if (ehdr.e_ident[EI_MAG0] != ELFMAG0 6615 || ehdr.e_ident[EI_MAG1] != ELFMAG1 6616 || ehdr.e_ident[EI_MAG2] != ELFMAG2 6617 || ehdr.e_ident[EI_MAG3] != ELFMAG3) 6618 { 6619 error_callback (data, "executable file is not ELF", 0); 6620 goto fail; 6621 } 6622 if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) 6623 { 6624 error_callback (data, "executable file is unrecognized ELF version", 0); 6625 goto fail; 6626 } 6627 6628#if BACKTRACE_ELF_SIZE == 32 6629#define BACKTRACE_ELFCLASS ELFCLASS32 6630#else 6631#define BACKTRACE_ELFCLASS ELFCLASS64 6632#endif 6633 6634 if (ehdr.e_ident[EI_CLASS] != BACKTRACE_ELFCLASS) 6635 { 6636 error_callback (data, "executable file is unexpected ELF class", 0); 6637 goto fail; 6638 } 6639 6640 if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB 6641 && ehdr.e_ident[EI_DATA] != ELFDATA2MSB) 6642 { 6643 error_callback (data, "executable file has unknown endianness", 0); 6644 goto fail; 6645 } 6646 6647 /* If the executable is ET_DYN, it is either a PIE, or we are running 6648 directly a shared library with .interp. We need to wait for 6649 dl_iterate_phdr in that case to determine the actual base_address. */ 6650 if (exe && ehdr.e_type == ET_DYN) 6651 return -1; 6652 6653 shoff = ehdr.e_shoff; 6654 shnum = ehdr.e_shnum; 6655 shstrndx = ehdr.e_shstrndx; 6656 6657 if ((shnum == 0 || shstrndx == SHN_XINDEX) 6658 && shoff != 0) 6659 { 6660 struct elf_view shdr_view; 6661 const b_elf_shdr *shdr; 6662 6663 if (!elf_get_view (state, descriptor, memory, memory_size, shoff, 6664 sizeof shdr, error_callback, data, &shdr_view)) 6665 goto fail; 6666 6667 shdr = (const b_elf_shdr *) shdr_view.view.data; 6668 6669 if (shnum == 0) 6670 shnum = shdr->sh_size; 6671 6672 if (shstrndx == SHN_XINDEX) 6673 { 6674 shstrndx = shdr->sh_link; 6675 6676 /* Versions of the GNU binutils between 2.12 and 2.18 did 6677 not handle objects with more than SHN_LORESERVE sections 6678 correctly. All large section indexes were offset by 6679 0x100. There is more information at 6680 http://sourceware.org/bugzilla/show_bug.cgi?id-5900 . 6681 Fortunately these object files are easy to detect, as the 6682 GNU binutils always put the section header string table 6683 near the end of the list of sections. Thus if the 6684 section header string table index is larger than the 6685 number of sections, then we know we have to subtract 6686 0x100 to get the real section index. */ 6687 if (shstrndx >= shnum && shstrndx >= SHN_LORESERVE + 0x100) 6688 shstrndx -= 0x100; 6689 } 6690 6691 elf_release_view (state, &shdr_view, error_callback, data); 6692 } 6693 6694 if (shnum == 0 || shstrndx == 0) 6695 goto fail; 6696 6697 /* To translate PC to file/line when using DWARF, we need to find 6698 the .debug_info and .debug_line sections. */ 6699 6700 /* Read the section headers, skipping the first one. */ 6701 6702 if (!elf_get_view (state, descriptor, memory, memory_size, 6703 shoff + sizeof (b_elf_shdr), 6704 (shnum - 1) * sizeof (b_elf_shdr), 6705 error_callback, data, &shdrs_view)) 6706 goto fail; 6707 shdrs_view_valid = 1; 6708 shdrs = (const b_elf_shdr *) shdrs_view.view.data; 6709 6710 /* Read the section names. */ 6711 6712 shstrhdr = &shdrs[shstrndx - 1]; 6713 shstr_size = shstrhdr->sh_size; 6714 shstr_off = shstrhdr->sh_offset; 6715 6716 if (!elf_get_view (state, descriptor, memory, memory_size, shstr_off, 6717 shstrhdr->sh_size, error_callback, data, &names_view)) 6718 goto fail; 6719 names_view_valid = 1; 6720 names = (const char *) names_view.view.data; 6721 6722 symtab_shndx = 0; 6723 dynsym_shndx = 0; 6724 6725 memset (sections, 0, sizeof sections); 6726 memset (zsections, 0, sizeof zsections); 6727 6728 /* Look for the symbol table. */ 6729 for (i = 1; i < shnum; ++i) 6730 { 6731 const b_elf_shdr *shdr; 6732 unsigned int sh_name; 6733 const char *name; 6734 int j; 6735 6736 shdr = &shdrs[i - 1]; 6737 6738 if (shdr->sh_type == SHT_SYMTAB) 6739 symtab_shndx = i; 6740 else if (shdr->sh_type == SHT_DYNSYM) 6741 dynsym_shndx = i; 6742 6743 sh_name = shdr->sh_name; 6744 if (sh_name >= shstr_size) 6745 { 6746 error_callback (data, "ELF section name out of range", 0); 6747 goto fail; 6748 } 6749 6750 name = names + sh_name; 6751 6752 for (j = 0; j < (int) DEBUG_MAX; ++j) 6753 { 6754 if (strcmp (name, dwarf_section_names[j]) == 0) 6755 { 6756 sections[j].offset = shdr->sh_offset; 6757 sections[j].size = shdr->sh_size; 6758 sections[j].compressed = (shdr->sh_flags & SHF_COMPRESSED) != 0; 6759 break; 6760 } 6761 } 6762 6763 if (name[0] == '.' && name[1] == 'z') 6764 { 6765 for (j = 0; j < (int) DEBUG_MAX; ++j) 6766 { 6767 if (strcmp (name + 2, dwarf_section_names[j] + 1) == 0) 6768 { 6769 zsections[j].offset = shdr->sh_offset; 6770 zsections[j].size = shdr->sh_size; 6771 break; 6772 } 6773 } 6774 } 6775 6776 /* Read the build ID if present. This could check for any 6777 SHT_NOTE section with the right note name and type, but gdb 6778 looks for a specific section name. */ 6779 if ((!debuginfo || with_buildid_data != NULL) 6780 && !buildid_view_valid 6781 && strcmp (name, ".note.gnu.build-id") == 0) 6782 { 6783 const b_elf_note *note; 6784 6785 if (!elf_get_view (state, descriptor, memory, memory_size, 6786 shdr->sh_offset, shdr->sh_size, error_callback, 6787 data, &buildid_view)) 6788 goto fail; 6789 6790 buildid_view_valid = 1; 6791 note = (const b_elf_note *) buildid_view.view.data; 6792 if (note->type == NT_GNU_BUILD_ID 6793 && note->namesz == 4 6794 && strncmp (note->name, "GNU", 4) == 0 6795 && shdr->sh_size <= 12 + ((note->namesz + 3) & ~ 3) + note->descsz) 6796 { 6797 buildid_data = &note->name[0] + ((note->namesz + 3) & ~ 3); 6798 buildid_size = note->descsz; 6799 } 6800 6801 if (with_buildid_size != 0) 6802 { 6803 if (buildid_size != with_buildid_size) 6804 goto fail; 6805 6806 if (memcmp (buildid_data, with_buildid_data, buildid_size) != 0) 6807 goto fail; 6808 } 6809 } 6810 6811 /* Read the debuglink file if present. */ 6812 if (!debuginfo 6813 && !debuglink_view_valid 6814 && strcmp (name, ".gnu_debuglink") == 0) 6815 { 6816 const char *debuglink_data; 6817 size_t crc_offset; 6818 6819 if (!elf_get_view (state, descriptor, memory, memory_size, 6820 shdr->sh_offset, shdr->sh_size, error_callback, 6821 data, &debuglink_view)) 6822 goto fail; 6823 6824 debuglink_view_valid = 1; 6825 debuglink_data = (const char *) debuglink_view.view.data; 6826 crc_offset = strnlen (debuglink_data, shdr->sh_size); 6827 crc_offset = (crc_offset + 3) & ~3; 6828 if (crc_offset + 4 <= shdr->sh_size) 6829 { 6830 debuglink_name = debuglink_data; 6831 debuglink_crc = *(const uint32_t*)(debuglink_data + crc_offset); 6832 } 6833 } 6834 6835 if (!debugaltlink_view_valid 6836 && strcmp (name, ".gnu_debugaltlink") == 0) 6837 { 6838 const char *debugaltlink_data; 6839 size_t debugaltlink_name_len; 6840 6841 if (!elf_get_view (state, descriptor, memory, memory_size, 6842 shdr->sh_offset, shdr->sh_size, error_callback, 6843 data, &debugaltlink_view)) 6844 goto fail; 6845 6846 debugaltlink_view_valid = 1; 6847 debugaltlink_data = (const char *) debugaltlink_view.view.data; 6848 debugaltlink_name = debugaltlink_data; 6849 debugaltlink_name_len = strnlen (debugaltlink_data, shdr->sh_size); 6850 if (debugaltlink_name_len < shdr->sh_size) 6851 { 6852 /* Include terminating zero. */ 6853 debugaltlink_name_len += 1; 6854 6855 debugaltlink_buildid_data 6856 = debugaltlink_data + debugaltlink_name_len; 6857 debugaltlink_buildid_size = shdr->sh_size - debugaltlink_name_len; 6858 } 6859 } 6860 6861 if (!gnu_debugdata_view_valid 6862 && strcmp (name, ".gnu_debugdata") == 0) 6863 { 6864 if (!elf_get_view (state, descriptor, memory, memory_size, 6865 shdr->sh_offset, shdr->sh_size, error_callback, 6866 data, &gnu_debugdata_view)) 6867 goto fail; 6868 6869 gnu_debugdata_size = shdr->sh_size; 6870 gnu_debugdata_view_valid = 1; 6871 } 6872 6873 /* Read the .opd section on PowerPC64 ELFv1. */ 6874 if (ehdr.e_machine == EM_PPC64 6875 && (ehdr.e_flags & EF_PPC64_ABI) < 2 6876 && shdr->sh_type == SHT_PROGBITS 6877 && strcmp (name, ".opd") == 0) 6878 { 6879 if (!elf_get_view (state, descriptor, memory, memory_size, 6880 shdr->sh_offset, shdr->sh_size, error_callback, 6881 data, &opd_data.view)) 6882 goto fail; 6883 6884 opd = &opd_data; 6885 opd->addr = shdr->sh_addr; 6886 opd->data = (const char *) opd_data.view.view.data; 6887 opd->size = shdr->sh_size; 6888 } 6889 } 6890 6891 if (symtab_shndx == 0) 6892 symtab_shndx = dynsym_shndx; 6893 if (symtab_shndx != 0 && !debuginfo) 6894 { 6895 const b_elf_shdr *symtab_shdr; 6896 unsigned int strtab_shndx; 6897 const b_elf_shdr *strtab_shdr; 6898 struct elf_syminfo_data *sdata; 6899 6900 symtab_shdr = &shdrs[symtab_shndx - 1]; 6901 strtab_shndx = symtab_shdr->sh_link; 6902 if (strtab_shndx >= shnum) 6903 { 6904 error_callback (data, 6905 "ELF symbol table strtab link out of range", 0); 6906 goto fail; 6907 } 6908 strtab_shdr = &shdrs[strtab_shndx - 1]; 6909 6910 if (!elf_get_view (state, descriptor, memory, memory_size, 6911 symtab_shdr->sh_offset, symtab_shdr->sh_size, 6912 error_callback, data, &symtab_view)) 6913 goto fail; 6914 symtab_view_valid = 1; 6915 6916 if (!elf_get_view (state, descriptor, memory, memory_size, 6917 strtab_shdr->sh_offset, strtab_shdr->sh_size, 6918 error_callback, data, &strtab_view)) 6919 goto fail; 6920 strtab_view_valid = 1; 6921 6922 sdata = ((struct elf_syminfo_data *) 6923 backtrace_alloc (state, sizeof *sdata, error_callback, data)); 6924 if (sdata == NULL) 6925 goto fail; 6926 6927 if (!elf_initialize_syminfo (state, base_address, 6928 (const unsigned char*)symtab_view.view.data, symtab_shdr->sh_size, 6929 (const unsigned char*)strtab_view.view.data, strtab_shdr->sh_size, 6930 error_callback, data, sdata, opd)) 6931 { 6932 backtrace_free (state, sdata, sizeof *sdata, error_callback, data); 6933 goto fail; 6934 } 6935 6936 /* We no longer need the symbol table, but we hold on to the 6937 string table permanently. */ 6938 elf_release_view (state, &symtab_view, error_callback, data); 6939 symtab_view_valid = 0; 6940 strtab_view_valid = 0; 6941 6942 *found_sym = 1; 6943 6944 elf_add_syminfo_data (state, sdata); 6945 } 6946 6947 elf_release_view (state, &shdrs_view, error_callback, data); 6948 shdrs_view_valid = 0; 6949 elf_release_view (state, &names_view, error_callback, data); 6950 names_view_valid = 0; 6951 6952 /* If the debug info is in a separate file, read that one instead. */ 6953 6954 if (buildid_data != NULL) 6955 { 6956 int d; 6957 6958 d = elf_open_debugfile_by_buildid (state, buildid_data, buildid_size, 6959 filename, error_callback, data); 6960 if (d >= 0) 6961 { 6962 int ret; 6963 6964 elf_release_view (state, &buildid_view, error_callback, data); 6965 if (debuglink_view_valid) 6966 elf_release_view (state, &debuglink_view, error_callback, data); 6967 if (debugaltlink_view_valid) 6968 elf_release_view (state, &debugaltlink_view, error_callback, data); 6969 ret = elf_add (state, "", d, NULL, 0, base_address, error_callback, 6970 data, fileline_fn, found_sym, found_dwarf, NULL, 0, 6971 1, NULL, 0); 6972 if (ret < 0) 6973 backtrace_close (d, error_callback, data); 6974 else if (descriptor >= 0) 6975 backtrace_close (descriptor, error_callback, data); 6976 return ret; 6977 } 6978 } 6979 6980 if (buildid_view_valid) 6981 { 6982 elf_release_view (state, &buildid_view, error_callback, data); 6983 buildid_view_valid = 0; 6984 } 6985 6986 if (opd) 6987 { 6988 elf_release_view (state, &opd->view, error_callback, data); 6989 opd = NULL; 6990 } 6991 6992 if (debuglink_name != NULL) 6993 { 6994 int d; 6995 6996 d = elf_open_debugfile_by_debuglink (state, filename, debuglink_name, 6997 debuglink_crc, error_callback, 6998 data); 6999 if (d >= 0) 7000 { 7001 int ret; 7002 7003 elf_release_view (state, &debuglink_view, error_callback, data); 7004 if (debugaltlink_view_valid) 7005 elf_release_view (state, &debugaltlink_view, error_callback, data); 7006 ret = elf_add (state, "", d, NULL, 0, base_address, error_callback, 7007 data, fileline_fn, found_sym, found_dwarf, NULL, 0, 7008 1, NULL, 0); 7009 if (ret < 0) 7010 backtrace_close (d, error_callback, data); 7011 else if (descriptor >= 0) 7012 backtrace_close(descriptor, error_callback, data); 7013 return ret; 7014 } 7015 } 7016 7017 if (debuglink_view_valid) 7018 { 7019 elf_release_view (state, &debuglink_view, error_callback, data); 7020 debuglink_view_valid = 0; 7021 } 7022 7023 if (debugaltlink_name != NULL) 7024 { 7025 int d; 7026 7027 d = elf_open_debugfile_by_debuglink (state, filename, debugaltlink_name, 7028 0, error_callback, data); 7029 if (d >= 0) 7030 { 7031 int ret; 7032 7033 ret = elf_add (state, filename, d, NULL, 0, base_address, 7034 error_callback, data, fileline_fn, found_sym, 7035 found_dwarf, &fileline_altlink, 0, 1, 7036 debugaltlink_buildid_data, debugaltlink_buildid_size); 7037 elf_release_view (state, &debugaltlink_view, error_callback, data); 7038 debugaltlink_view_valid = 0; 7039 if (ret < 0) 7040 { 7041 backtrace_close (d, error_callback, data); 7042 return ret; 7043 } 7044 } 7045 } 7046 7047 if (debugaltlink_view_valid) 7048 { 7049 elf_release_view (state, &debugaltlink_view, error_callback, data); 7050 debugaltlink_view_valid = 0; 7051 } 7052 7053 if (gnu_debugdata_view_valid) 7054 { 7055 int ret; 7056 7057 ret = elf_uncompress_lzma (state, 7058 ((const unsigned char *) 7059 gnu_debugdata_view.view.data), 7060 gnu_debugdata_size, error_callback, data, 7061 &gnu_debugdata_uncompressed, 7062 &gnu_debugdata_uncompressed_size); 7063 7064 elf_release_view (state, &gnu_debugdata_view, error_callback, data); 7065 gnu_debugdata_view_valid = 0; 7066 7067 if (ret) 7068 { 7069 ret = elf_add (state, filename, -1, gnu_debugdata_uncompressed, 7070 gnu_debugdata_uncompressed_size, base_address, 7071 error_callback, data, fileline_fn, found_sym, 7072 found_dwarf, NULL, 0, 0, NULL, 0); 7073 if (ret >= 0 && descriptor >= 0) 7074 backtrace_close(descriptor, error_callback, data); 7075 return ret; 7076 } 7077 } 7078 7079 /* Read all the debug sections in a single view, since they are 7080 probably adjacent in the file. If any of sections are 7081 uncompressed, we never release this view. */ 7082 7083 min_offset = 0; 7084 max_offset = 0; 7085 debug_size = 0; 7086 for (i = 0; i < (int) DEBUG_MAX; ++i) 7087 { 7088 off_t end; 7089 7090 if (sections[i].size != 0) 7091 { 7092 if (min_offset == 0 || sections[i].offset < min_offset) 7093 min_offset = sections[i].offset; 7094 end = sections[i].offset + sections[i].size; 7095 if (end > max_offset) 7096 max_offset = end; 7097 debug_size += sections[i].size; 7098 } 7099 if (zsections[i].size != 0) 7100 { 7101 if (min_offset == 0 || zsections[i].offset < min_offset) 7102 min_offset = zsections[i].offset; 7103 end = zsections[i].offset + zsections[i].size; 7104 if (end > max_offset) 7105 max_offset = end; 7106 debug_size += zsections[i].size; 7107 } 7108 } 7109 if (min_offset == 0 || max_offset == 0) 7110 { 7111 if (descriptor >= 0) 7112 { 7113 if (!backtrace_close (descriptor, error_callback, data)) 7114 goto fail; 7115 } 7116 return 1; 7117 } 7118 7119 /* If the total debug section size is large, assume that there are 7120 gaps between the sections, and read them individually. */ 7121 7122 if (max_offset - min_offset < 0x20000000 7123 || max_offset - min_offset < debug_size + 0x10000) 7124 { 7125 if (!elf_get_view (state, descriptor, memory, memory_size, min_offset, 7126 max_offset - min_offset, error_callback, data, 7127 &debug_view)) 7128 goto fail; 7129 debug_view_valid = 1; 7130 } 7131 else 7132 { 7133 memset (&split_debug_view[0], 0, sizeof split_debug_view); 7134 for (i = 0; i < (int) DEBUG_MAX; ++i) 7135 { 7136 struct debug_section_info *dsec; 7137 7138 if (sections[i].size != 0) 7139 dsec = &sections[i]; 7140 else if (zsections[i].size != 0) 7141 dsec = &zsections[i]; 7142 else 7143 continue; 7144 7145 if (!elf_get_view (state, descriptor, memory, memory_size, 7146 dsec->offset, dsec->size, error_callback, data, 7147 &split_debug_view[i])) 7148 goto fail; 7149 split_debug_view_valid[i] = 1; 7150 7151 if (sections[i].size != 0) 7152 sections[i].data = ((const unsigned char *) 7153 split_debug_view[i].view.data); 7154 else 7155 zsections[i].data = ((const unsigned char *) 7156 split_debug_view[i].view.data); 7157 } 7158 } 7159 7160 /* We've read all we need from the executable. */ 7161 if (descriptor >= 0) 7162 { 7163 if (!backtrace_close (descriptor, error_callback, data)) 7164 goto fail; 7165 descriptor = -1; 7166 } 7167 7168 using_debug_view = 0; 7169 if (debug_view_valid) 7170 { 7171 for (i = 0; i < (int) DEBUG_MAX; ++i) 7172 { 7173 if (sections[i].size == 0) 7174 sections[i].data = NULL; 7175 else 7176 { 7177 sections[i].data = ((const unsigned char *) debug_view.view.data 7178 + (sections[i].offset - min_offset)); 7179 ++using_debug_view; 7180 } 7181 7182 if (zsections[i].size == 0) 7183 zsections[i].data = NULL; 7184 else 7185 zsections[i].data = ((const unsigned char *) debug_view.view.data 7186 + (zsections[i].offset - min_offset)); 7187 } 7188 } 7189 7190 /* Uncompress the old format (--compress-debug-sections=zlib-gnu). */ 7191 7192 zdebug_table = NULL; 7193 for (i = 0; i < (int) DEBUG_MAX; ++i) 7194 { 7195 if (sections[i].size == 0 && zsections[i].size > 0) 7196 { 7197 unsigned char *uncompressed_data; 7198 size_t uncompressed_size; 7199 7200 if (zdebug_table == NULL) 7201 { 7202 zdebug_table = ((uint16_t *) 7203 backtrace_alloc (state, ZLIB_TABLE_SIZE, 7204 error_callback, data)); 7205 if (zdebug_table == NULL) 7206 goto fail; 7207 } 7208 7209 uncompressed_data = NULL; 7210 uncompressed_size = 0; 7211 if (!elf_uncompress_zdebug (state, zsections[i].data, 7212 zsections[i].size, zdebug_table, 7213 error_callback, data, 7214 &uncompressed_data, &uncompressed_size)) 7215 goto fail; 7216 sections[i].data = uncompressed_data; 7217 sections[i].size = uncompressed_size; 7218 sections[i].compressed = 0; 7219 7220 if (split_debug_view_valid[i]) 7221 { 7222 elf_release_view (state, &split_debug_view[i], 7223 error_callback, data); 7224 split_debug_view_valid[i] = 0; 7225 } 7226 } 7227 } 7228 7229 if (zdebug_table != NULL) 7230 { 7231 backtrace_free (state, zdebug_table, ZLIB_TABLE_SIZE, 7232 error_callback, data); 7233 zdebug_table = NULL; 7234 } 7235 7236 /* Uncompress the official ELF format 7237 (--compress-debug-sections=zlib-gabi, --compress-debug-sections=zstd). */ 7238 for (i = 0; i < (int) DEBUG_MAX; ++i) 7239 { 7240 unsigned char *uncompressed_data; 7241 size_t uncompressed_size; 7242 7243 if (sections[i].size == 0 || !sections[i].compressed) 7244 continue; 7245 7246 if (zdebug_table == NULL) 7247 { 7248 zdebug_table = ((uint16_t *) 7249 backtrace_alloc (state, ZDEBUG_TABLE_SIZE, 7250 error_callback, data)); 7251 if (zdebug_table == NULL) 7252 goto fail; 7253 } 7254 7255 uncompressed_data = NULL; 7256 uncompressed_size = 0; 7257 if (!elf_uncompress_chdr (state, sections[i].data, sections[i].size, 7258 zdebug_table, error_callback, data, 7259 &uncompressed_data, &uncompressed_size)) 7260 goto fail; 7261 sections[i].data = uncompressed_data; 7262 sections[i].size = uncompressed_size; 7263 sections[i].compressed = 0; 7264 7265 if (debug_view_valid) 7266 --using_debug_view; 7267 else if (split_debug_view_valid[i]) 7268 { 7269 elf_release_view (state, &split_debug_view[i], error_callback, data); 7270 split_debug_view_valid[i] = 0; 7271 } 7272 } 7273 7274 if (zdebug_table != NULL) 7275 backtrace_free (state, zdebug_table, ZDEBUG_TABLE_SIZE, 7276 error_callback, data); 7277 7278 if (debug_view_valid && using_debug_view == 0) 7279 { 7280 elf_release_view (state, &debug_view, error_callback, data); 7281 debug_view_valid = 0; 7282 } 7283 7284 for (i = 0; i < (int) DEBUG_MAX; ++i) 7285 { 7286 dwarf_sections.data[i] = sections[i].data; 7287 dwarf_sections.size[i] = sections[i].size; 7288 } 7289 7290 if (!backtrace_dwarf_add (state, base_address, &dwarf_sections, 7291 ehdr.e_ident[EI_DATA] == ELFDATA2MSB, 7292 fileline_altlink, 7293 error_callback, data, fileline_fn, 7294 fileline_entry)) 7295 goto fail; 7296 7297 *found_dwarf = 1; 7298 7299 return 1; 7300 7301 fail: 7302 if (shdrs_view_valid) 7303 elf_release_view (state, &shdrs_view, error_callback, data); 7304 if (names_view_valid) 7305 elf_release_view (state, &names_view, error_callback, data); 7306 if (symtab_view_valid) 7307 elf_release_view (state, &symtab_view, error_callback, data); 7308 if (strtab_view_valid) 7309 elf_release_view (state, &strtab_view, error_callback, data); 7310 if (debuglink_view_valid) 7311 elf_release_view (state, &debuglink_view, error_callback, data); 7312 if (debugaltlink_view_valid) 7313 elf_release_view (state, &debugaltlink_view, error_callback, data); 7314 if (gnu_debugdata_view_valid) 7315 elf_release_view (state, &gnu_debugdata_view, error_callback, data); 7316 if (buildid_view_valid) 7317 elf_release_view (state, &buildid_view, error_callback, data); 7318 if (debug_view_valid) 7319 elf_release_view (state, &debug_view, error_callback, data); 7320 for (i = 0; i < (int) DEBUG_MAX; ++i) 7321 { 7322 if (split_debug_view_valid[i]) 7323 elf_release_view (state, &split_debug_view[i], error_callback, data); 7324 } 7325 if (opd) 7326 elf_release_view (state, &opd->view, error_callback, data); 7327 if (descriptor >= 0) 7328 backtrace_close (descriptor, error_callback, data); 7329 return 0; 7330} 7331 7332/* Data passed to phdr_callback. */ 7333 7334struct phdr_data 7335{ 7336 struct backtrace_state *state; 7337 backtrace_error_callback error_callback; 7338 void *data; 7339 fileline *fileline_fn; 7340 int *found_sym; 7341 int *found_dwarf; 7342 const char *exe_filename; 7343 int exe_descriptor; 7344}; 7345 7346/* Callback passed to dl_iterate_phdr. Load debug info from shared 7347 libraries. */ 7348 7349struct PhdrIterate 7350{ 7351 char* dlpi_name; 7352 ElfW(Addr) dlpi_addr; 7353}; 7354FastVector<PhdrIterate> s_phdrData(16); 7355 7356static int 7357phdr_callback_mock (struct dl_phdr_info *info, size_t size ATTRIBUTE_UNUSED, 7358 void *pdata) 7359{ 7360 auto ptr = s_phdrData.push_next(); 7361 if (info->dlpi_name) 7362 { 7363 size_t sz = strlen (info->dlpi_name) + 1; 7364 ptr->dlpi_name = (char*)tracy_malloc (sz); 7365 memcpy (ptr->dlpi_name, info->dlpi_name, sz); 7366 } 7367 else ptr->dlpi_name = nullptr; 7368 ptr->dlpi_addr = info->dlpi_addr; 7369 return 0; 7370} 7371 7372static int 7373#ifdef __i386__ 7374__attribute__ ((__force_align_arg_pointer__)) 7375#endif 7376phdr_callback (struct PhdrIterate *info, void *pdata) 7377{ 7378 struct phdr_data *pd = (struct phdr_data *) pdata; 7379 const char *filename; 7380 int descriptor; 7381 int does_not_exist; 7382 fileline elf_fileline_fn; 7383 int found_dwarf; 7384 7385 /* There is not much we can do if we don't have the module name, 7386 unless executable is ET_DYN, where we expect the very first 7387 phdr_callback to be for the PIE. */ 7388 if (info->dlpi_name == NULL || info->dlpi_name[0] == '\0') 7389 { 7390 if (pd->exe_descriptor == -1) 7391 return 0; 7392 filename = pd->exe_filename; 7393 descriptor = pd->exe_descriptor; 7394 pd->exe_descriptor = -1; 7395 } 7396 else 7397 { 7398 if (pd->exe_descriptor != -1) 7399 { 7400 backtrace_close (pd->exe_descriptor, pd->error_callback, pd->data); 7401 pd->exe_descriptor = -1; 7402 } 7403 7404 filename = info->dlpi_name; 7405 descriptor = backtrace_open (info->dlpi_name, pd->error_callback, 7406 pd->data, &does_not_exist); 7407 if (descriptor < 0) 7408 return 0; 7409 } 7410 7411 if (elf_add (pd->state, filename, descriptor, NULL, 0, info->dlpi_addr, 7412 pd->error_callback, pd->data, &elf_fileline_fn, pd->found_sym, 7413 &found_dwarf, NULL, 0, 0, NULL, 0)) 7414 { 7415 if (found_dwarf) 7416 { 7417 *pd->found_dwarf = 1; 7418 *pd->fileline_fn = elf_fileline_fn; 7419 } 7420 } 7421 7422 return 0; 7423} 7424 7425/* Initialize the backtrace data we need from an ELF executable. At 7426 the ELF level, all we need to do is find the debug info 7427 sections. */ 7428 7429int 7430backtrace_initialize (struct backtrace_state *state, const char *filename, 7431 int descriptor, backtrace_error_callback error_callback, 7432 void *data, fileline *fileline_fn) 7433{ 7434 int ret; 7435 int found_sym; 7436 int found_dwarf; 7437 fileline elf_fileline_fn = elf_nodebug; 7438 struct phdr_data pd; 7439 7440 ret = elf_add (state, filename, descriptor, NULL, 0, 0, error_callback, data, 7441 &elf_fileline_fn, &found_sym, &found_dwarf, NULL, 1, 0, NULL, 7442 0); 7443 if (!ret) 7444 return 0; 7445 7446 pd.state = state; 7447 pd.error_callback = error_callback; 7448 pd.data = data; 7449 pd.fileline_fn = &elf_fileline_fn; 7450 pd.found_sym = &found_sym; 7451 pd.found_dwarf = &found_dwarf; 7452 pd.exe_filename = filename; 7453 pd.exe_descriptor = ret < 0 ? descriptor : -1; 7454 7455 assert (s_phdrData.empty()); 7456 dl_iterate_phdr (phdr_callback_mock, nullptr); 7457 for (auto& v : s_phdrData) 7458 { 7459 phdr_callback (&v, (void *) &pd); 7460 tracy_free (v.dlpi_name); 7461 } 7462 s_phdrData.clear(); 7463 7464 if (!state->threaded) 7465 { 7466 if (found_sym) 7467 state->syminfo_fn = elf_syminfo; 7468 else if (state->syminfo_fn == NULL) 7469 state->syminfo_fn = elf_nosyms; 7470 } 7471 else 7472 { 7473 if (found_sym) 7474 backtrace_atomic_store_pointer (&state->syminfo_fn, &elf_syminfo); 7475 else 7476 (void) __sync_bool_compare_and_swap (&state->syminfo_fn, NULL, 7477 elf_nosyms); 7478 } 7479 7480 if (!state->threaded) 7481 *fileline_fn = state->fileline_fn; 7482 else 7483 *fileline_fn = backtrace_atomic_load_pointer (&state->fileline_fn); 7484 7485 if (*fileline_fn == NULL || *fileline_fn == elf_nodebug) 7486 *fileline_fn = elf_fileline_fn; 7487 7488 return 1; 7489} 7490 7491}